diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 94325d02e..000000000 --- a/.coveragerc +++ /dev/null @@ -1,9 +0,0 @@ -[run] -branch = True -source = senlin -omit = senlin/tests/*,senlin/hacking/* -concurrency = greenlet - -[report] -ignore_errors = True - diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 00ae09a3c..000000000 --- a/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -AUTHORS -ChangeLog -build -cover -cover-master -dist -doc/source/_static/senlin.policy.yaml.sample -etc/senlin/policy.yaml.sample -etc/senlin/senlin.conf.sample -releasenodes/build -senlin-test.db -senlin.sqlite -tags -*~ -*.eggs -*.egg-info -*.iml -*.log -*.pyc -*.swp -*.swo -.coverage -.coverage.* -.idea -.project -.pydevproject -.tox -.venv -.DS_Store -.stestr diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 92f7ddfb9..000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${OS_TEST_PATH:-./senlin/tests/unit} -top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 458a626d8..000000000 --- a/.zuul.yaml +++ /dev/null @@ -1,180 +0,0 @@ -- project: - queue: senlin - templates: - - check-requirements - - openstack-python3-jobs - - publish-openstack-docs-pti - - release-notes-jobs-python3 - check: - jobs: - - senlin-dsvm-tempest-py3-api - - senlin-dsvm-tempest-py3-api-sqlalchemy-2x - - senlin-tempest-api-ipv6-only - - senlin-dsvm-tempest-py3-functional - - senlin-dsvm-tempest-py3-functional-sqlalchemy-2x - - senlin-dsvm-tempest-py3-integration - - senlin-dsvm-tempest-py3-integration-zaqar: - voting: false - - openstack-tox-cover: - voting: false - gate: - jobs: - - senlin-dsvm-tempest-py3-api - - senlin-tempest-api-ipv6-only - - senlin-dsvm-tempest-py3-functional - -- job: - name: senlin-tempest-base - parent: devstack-tempest - description: Senlin Devstack tempest base job - timeout: 7800 - required-projects: &base_required_projects - - openstack/senlin - - openstack/senlin-tempest-plugin - irrelevant-files: &base_irrelevant_files - - ^.*\.rst$ - - ^api-ref/.*$ - - ^doc/.*$ - - ^releasenotes/.*$ - vars: &base_vars - tox_envlist: all - devstack_services: - tempest: true - devstack_plugins: - senlin: https://opendev.org/openstack/senlin - devstack_localrc: - TEMPEST_PLUGINS: '/opt/stack/senlin-tempest-plugin' - USE_PYTHON3: true - devstack_local_conf: - test-config: - $TEMPEST_CONFIG: - clustering: - min_microversion: 1.12 - max_microversion: 1.12 - delete_with_dependency: True - health_policy_version: '1.1' - -- job: - name: senlin-dsvm-tempest-py3-api - parent: senlin-tempest-base - vars: - tempest_test_regex: senlin_tempest_plugin.tests.api - devstack_localrc: - USE_PYTHON3: true - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test - -- job: - name: senlin-dsvm-tempest-py3-api-sqlalchemy-2x - parent: senlin-tempest-base - required-projects: - - name: openstack/oslo.db - vars: - tempest_test_regex: senlin_tempest_plugin.tests.api - devstack_localrc: - USE_PYTHON3: true - USE_SQLALCHEMY_LATEST: true - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test - -- job: - name: senlin-dsvm-tempest-py3-functional - parent: senlin-tempest-base - vars: - tempest_test_regex: senlin_tempest_plugin.tests.functional - devstack_localrc: - USE_PYTHON3: true - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test - health_check_interval_min: 10 - -- job: - name: senlin-dsvm-tempest-py3-functional-sqlalchemy-2x - parent: senlin-tempest-base - required-projects: - - name: openstack/oslo.db - vars: - tempest_test_regex: senlin_tempest_plugin.tests.functional - devstack_localrc: - USE_PYTHON3: true - USE_SQLALCHEMY_LATEST: true - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test - health_check_interval_min: 10 - -- job: - name: senlin-dsvm-tempest-py3-integration - parent: senlin-tempest-base - vars: - tempest_test_regex: senlin_tempest_plugin.tests.integration(?!\.test_nova_server_cluster.TestNovaServerCluster).* - devstack_plugins: - heat: https://opendev.org/openstack/heat - devstack_localrc: - USE_PYTHON3: true - TEMPEST_PLUGINS: '"/opt/stack/senlin-tempest-plugin /opt/stack/zaqar-tempest-plugin"' - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - health_check_interval_min: 10 - required-projects: - - openstack/heat - - openstack/octavia - - openstack/python-zaqarclient - - openstack/senlin - - openstack/senlin-tempest-plugin - - openstack/zaqar-tempest-plugin - -- job: - name: senlin-dsvm-tempest-py3-integration-zaqar - parent: senlin-tempest-base - vars: - tempest_test_regex: senlin_tempest_plugin.tests.integration.test_nova_server_cluster.TestNovaServerCluster - devstack_plugins: - zaqar: https://opendev.org/openstack/zaqar - heat: https://opendev.org/openstack/heat - devstack_localrc: - USE_PYTHON3: true - TEMPEST_PLUGINS: '"/opt/stack/senlin-tempest-plugin /opt/stack/zaqar-tempest-plugin"' - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - health_check_interval_min: 10 - required-projects: - - openstack/heat - - openstack/python-zaqarclient - - openstack/senlin - - openstack/senlin-tempest-plugin - - openstack/zaqar - - openstack/zaqar-ui - - openstack/zaqar-tempest-plugin - -- job: - name: senlin-tempest-api-ipv6-only - parent: devstack-tempest-ipv6 - description: | - Senlin devstack tempest tests job for IPv6-only deployment - irrelevant-files: *base_irrelevant_files - required-projects: *base_required_projects - timeout: 7800 - vars: - <<: *base_vars - tempest_test_regex: senlin_tempest_plugin.tests.api - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 3925fd52f..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,91 +0,0 @@ -Before You Start -================ - -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - - -Where to Start -============== - -There are many ways to start your contribution. - -Sign on a bug to fix --------------------- - -Bugs related to senlin are reported and tracked on the individual sites on -Launchpad: - -- Senlin Server: https://bugs.launchpad.net/senlin -- Senlin Client: https://bugs.launchpad.net/python-senlinclient -- Senlin Dashboard: https://bugs.launchpad.net/senlin-dashboard - -You can pick any bug item that has not been assigned to work on. Each bug fix -patch should be accompanied with a release note. - - -Pick a TODO item ----------------- - -Senlin team maintains a ``TODO.rst`` file under the root directory, where you -can add new items, claim existing items and remove items that are completed. -You may want to check if there are items you can pick by: - -#. Propose a patch to remove the item from the ``TODO.rst`` file. -#. Add an item to the `etherpad page`_ which the core team uses to track the - progress of individual work items. -#. Start working on the item and keep updating your progress on the `etherpad - page`_, e.g. paste the patch review link to the page. -#. Mark the item from the `etherpad page`_ as completed when the patches are - all merged. - - -Start a Bigger Effort ---------------------- - -Senlin team also maintains a ``FEATURES.rst`` file under the root directory, -where you can add new items by proposing a patch to the file or claim an item -to work on. However, the work items in the ``FEATURES.rst`` file are all -non-trivial, thus demands for a deeper discussion before being worked on. The -expected workflow for these items is: - -#. Propose a spec file to the ``doc/specs`` directory describing the detailed - design and other options, if any. -#. Work with the reviewers to polish the design until it is accepted. -#. Propose blueprint(s) to track the progress of the work item by registering - them at the `blueprint page`_. -#. Start working on the blueprints and checking in patches. Each patch should - have a ``partial-blueprint: `` tag in its commit message. -#. For each blueprint, add an item to the `etherpad page`_ so that it can be - closely tracked in weekly meetings. -#. Mark the blueprint(s) as completed when all related patches are merged. -#. Propose a patch to the ``FEATURES.rst`` file to remove the work item. -#. Propose a separate release note patch for the new feature. - - -Reporting Bugs -============== - -Bugs should be filed on Launchpad site: - -- Senlin Server: https://bugs.launchpad.net/senlin -- Senlin Client: https://bugs.launchpad.net/python-senlinclient -- Senlin Dashboard: https://bugs.launchpad.net/senlin-dashboard - - -Meet the Developers -=================== - -Real-time communication among developers are mostly done via IRC. -The team is using the #senlin channel on oftc.net. - -.. _`etherpad page`: https://etherpad.openstack.org/p/senlin-newton-workitems -.. _`blueprint page`: https://blueprints.launchpad.net/senlin diff --git a/FEATURES.rst b/FEATURES.rst deleted file mode 100644 index d94331d05..000000000 --- a/FEATURES.rst +++ /dev/null @@ -1,284 +0,0 @@ -Senlin Feature Request Pipeline -=============================== - -This document records the feature requests the developer team has received and -considered. This document SHOULD NOT be treated as a replacement of the -blueprints (or specs) which already accompanied with a design. The feature -requests here are meant to be a pipeline for mid-term goals that Senlin should -strive to achieve. Whenever a feature can be implemented with a practical -design, the feature should be moved to a blueprint (and/or specs) review. - -This document SHOULD NOT be treated as a replacement of the `TODO` file the -development team is maintaining. The `TODO` file records actionable work items -that can be picked up by any developer who is willing to do it, while this -document records more general requirements that needs at least a draft design -before being worked on. - - -High Priority -~~~~~~~~~~~~~ - -TOSCA support -------------- - -Provide TOSCA support in Senlin (maybe reuse heat-translator/tosca-parser?) - - -Advanced Container Clustering ------------------------------ - -Container cluster management: - -- Scheduling -- Networking/Storage -- APIs/Operations -- Security issues -- Dependencies - - -Better Versioning for Profile/Policy ------------------------------------- - -Profile/Policy schema could vary over time for properties being added or -deprecated. Versioning support is important for keeping backward -compatibility when profile/policy evolve. - - -Role-specific Profiles ----------------------- - -There are needs to have nodes of the same role to share a common profile while -nodes of different roles having different profiles. The pre-condition for this -is that the profile-types match. - - -Scavenger Process ------------------ - -Senlin needs a scavenger process that runs as a background daemon. It is -tasked with cleansing database for old data, e.g. event records. Its behavior -must be customizable because users may want the old records to be removed or -to be archived in a certain way. - - -Fault Tolerance ---------------- - -Senlin in most cases will be managing clusters with nodes distributed -somewhere. One problems inherent to such a distributed architecture is about -partial failures, communication latencies, concurrency, consistency etc. There -are hardware/software failures expected. Senlin must remain operational in the -face of such failures. - - -Scaling to Existing Nodes -------------------------- - -[Conclusion from Austin: https://etherpad.openstack.org/p/newton-senlin-as] - -Senlin can improve scale-out operation so that it can add existing nodes to -a cluster when doing scale-out. We are not intended to scale to nodes not -created by Senlin. - - -Adoption of Nodes ------------------ - -There have been requirements on adopting existing resources (e.g. nova -servers) to be managed by Senlin. - - -Middle Priority -~~~~~~~~~~~~~~~ - -Access Control --------------- - -Currently, all access to Senlin objects like cluster, profile are project_safe -by default. This is for preventing user manipulating resources belong to other -users. However, sharing resource between different users/projects with limited -privilege(e.g. read-only, read-write) is also a very reasonable demand in many -cases. Therefore, we may need to provide access permission control in Senlin to -support this kind of requirement. - - -Blue-Green Deployment ---------------------- - -Support to deploy environments using blue-green deployment pattern. -http://martinfowler.com/bliki/BlueGreenDeployment.html - - -Multi-cloud Support -------------------- - -In some case, user could have the demand to create/scale cluster cross different -clouds. Therefore, Senlin is supposed to have the ability to manage nodes which -span cross multiple clouds within the same cluster. Support from both profile -and policy layers are necessary for providing this ability. - - -Customizable Batch Processing ------------------------------ - -An important non-functional requirement for Senlin is the scale of clusters it -can handle. We will strive to make it handle large scale ones, however that -indicates that we need to improve DB accesses in case of heavy loads. One -potential tradeoff is to introduce an option for users to customize the size -of batches when large number of DB requests pouring in. - - -Support to Bare-metal ---------------------- - -Managing baremetal cluster is a very common requirement from user. It is -reasonable for Senlin to support it by talking with service like Ironic. - - -Improve health schedule ------------------------ -Schedule which engine to handle which clusters health registries can be -improved. For example:1. When first engine start it will run all health -registries. 2. When the other engine start it can send a broadcast -message which carried its handling capacity and said it want to assume -some health registries. - - -Host Fencing Support --------------------- -To ensure a seemingly dead node is actually dead, all HA solutions need a way -to kill a node for sure. Senlin is no exception here. We have support to force -delete a VM instance already. The need is a mechanism to kill a failed host. - - -LB HealthMonitor based failure detection ----------------------------------------- -Ideally, Senlin could rely on the LBaaS service for node failure detection -rather than reinventing the wheel. However, LBaaS (Octavia) is not fixing the -obvious bug. -Another option is to have LBaaS emit events when node failures are detected. -This proposal has failed find its way into the upstream. -When the upstream project (Octavia) has such features, we can enable them from -Senlin side. - - -Low Priority -~~~~~~~~~~~~ - -User Defined Actions --------------------- - -Actions in Senlin are mostly built-in ones at present. There are requirements -to incorporate Shell scripts and/or other structured software configuration -tools into the whole picture. One of the option is to provide an easy way for -Senlin to work with Ansible, for example. - - -Use Barbican to Store Secrets ------------------------------ - -Currently, Senlin uses the `cryptography` package for data encryption and -decryption. There should be support for users to store credentials using the -Barbican service, in addition to the current solution. - - -Use VPNaaS to Build Cross-Region/Cross-Cloud --------------------------------------------- - -When building clusters that span more than one region or cloud, there are -requirements to place all cluster nodes on the same VPN so that workloads can -be distributed to the nodes as if they sit on the same network. - - -Vertical Scaling ----------------- - -Though Senlin is mainly concerns about the horizontal scaling in/out support, -there are possibilities/requirements to scale nodes in the vertical direction. -Vertical scaling means automatically adding compute/storage/network resources -to cluster nodes. Depending on the support from corresponding services, this -could be explored. - - -Replace Green Threads with Python Threading -------------------------------------------- - -Senlin is now using green threads (eventlets) for async executions. The -eventlets execution model is not making the use of multi-processing platforms -in an efficient way. Senlin needs a scalable execution engine, so native -multi-threading is needed. - - -Metrics Collection ------------------- - -Senlin needs to support metric collections about the clusters and nodes it -manages. These metrics should be collectible by the ceilometer service, for -example. - - -AWS Compatible API ------------------- - -There are requirements for Senlin to provide an AWS compatible API layer so -that existing workloads can be deployed to Senlin and AWS without needing to -change a lot of code or configurations. - - -Integration with Mistral ------------------------- - -There are cases where the (automated) operations on clusters and nodes form a -workflow. For example, an event triggers some actions to be executed in -sequence and those actions in turn triggers other actions to be executed. - - -Support to Suspend/Resume Operations ------------------------------------- - -A user may want to suspend/resume a cluster or an individual node. Senlin -needs to provide a generic definition of 'suspend' and 'resume'. It needs to -be aware of whether the profile and the driver support such operations. - - -Interaction with Congress -------------------------- - -This is of low priority because Senlin needs a notification mechanism in place -before it can talk to Congress. The reason to interact with Congress is that -there could be enterprise level policy enforcement that Senlin has to comply -to. - - -Investigation of Tooz ---------------------- - -There is requirement to manage multiple senlin-engine instances in a -distributed way. Or, we can use a variant of DLM to manage cluster membership. -E.g. use redis/zookeeper to build clusters in their sense so that when the -cluster membership changes, we may possibly receive a notification. This would -be helpful for cluster health management. - -Tooz is the promised focal point in this field, generalizing the many backends -that we don't want to care about. This TODO item is about two things: - -#. Whether Tooz does provide a reliable membership management infra? -#. Is there a comparison between zookeeper and redis for example. - - -Support to Scheduled Actions ----------------------------- - -This is a request to trigger some actions at a specified time. One typical use -case is to scale up a cluster before weekend or promotion season as a -preparation for the coming burst of workloads. - - -Dynamic Plugin Loading ----------------------- - -Design and implement dynamic plugin loading mechanism that allows loading -plugins from any paths. - - - diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 3b1bbc165..000000000 --- a/HACKING.rst +++ /dev/null @@ -1,57 +0,0 @@ -Senlin Style Commandments -========================= - -- Step 1: Read the OpenStack Style Commandments - https://docs.openstack.org/hacking/latest/ -- Step 2: Read on - -Senlin Specific Commandments ----------------------------- - -- [S318] Use assertion ``assertIsNone(A)`` instead of ``assertEqual(A, None)`` - or ``assertEqual(None, A)``. -- [S319] Use ``jsonutils`` functions rather than using the ``json`` package - directly. -- [S320] Default arguments of a method should not be mutable. -- [S321] The api_version decorator has to be the first decorator on a method. -- [S322] LOG.warn is deprecated. Enforce use of LOG.warning. -- [S323] Use assertTrue(...) rather than assertEqual(True, ...). - -Working on APIs ---------------- - -If you are proposing new APIs or fixes to existing APIs, please spend some -time reading the guidelines published by the API WorkGroup: - -https://opendev.org/openstack/api-sig/src/branch/master/guidelines - -Any work on improving Senlin's APIs to conform to the guidelines are welcomed. - -Creating Unit Tests -------------------- - -For every new feature, unit tests should be created that both test and -(implicitly) document the usage of said feature. When submitting a patch to a -bug without a unit test, a new unit test should be added. If a submitted bug -fix does have a unit test, be sure to add a new one that fails without the -patch and passes with the patch. - -For more information on creating and running unit tests , please read -senlin/doc/source/contributor/testing.rst. Test guide online link: -https://docs.openstack.org/senlin/latest/contributor/testing.html - - -Running Tests -------------- - -The testing system is based on a combination of `tox` and `testr`. The -canonical approach to running tests is to simply run the command `tox`. -This will create virtual environments, populate them with dependencies and -run all of the tests that OpenStack CI systems run. - -Behind the scenes, `tox` is running `ostestr --slowest`, but is set up such -that you can supply any additional arguments to the `ostestr` command. -For example, the following command makes `tox` to tell `ostestr` to add -`--analyze-isolation` to its argument list:: - - tox -- --analyze-isolation diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a09..000000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README.rst b/README.rst index 7b6988033..4ee2c5f13 100644 --- a/README.rst +++ b/README.rst @@ -1,96 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/senlin.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - -Senlin -====== - --------- -Overview --------- - -Senlin is a clustering service for OpenStack clouds. It creates and operates -clusters of homogeneous objects exposed by other OpenStack services. The goal -is to make the orchestration of collections of similar objects easier. - -Senlin provides RESTful APIs to users so that they can associate various -policies to a cluster. Sample policies include placement policy, load -balancing policy, health policy, scaling policy, update policy and so on. - -Senlin is designed to be capable of managing different types of objects. An -object's lifecycle is managed using profile type implementations, which are -themselves plugins. - ---------- -For Users ---------- - -If you want to install Senlin for a try out, please refer to the documents -under the ``doc/source/user/`` subdirectory. User guide online link: -https://docs.openstack.org/senlin/latest/#user-references - --------------- -For Developers --------------- - -There are many ways to help improve the software, for example, filing a bug, -submitting or reviewing a patch, writing or reviewing some documents. There -are documents under the ``doc/source/contributor`` subdirectory. Developer -guide online link: https://docs.openstack.org/senlin/latest/#developer-s-guide - ---------- -Resources ---------- - -Launchpad Projects ------------------- -- Server: https://launchpad.net/senlin -- Client: https://launchpad.net/python-senlinclient -- Dashboard: https://launchpad.net/senlin-dashboard -- Tempest Plugin: https://launchpad.net/senlin-tempest-plugin - -Code Repository ---------------- -- Server: https://opendev.org/openstack/senlin -- Client: https://opendev.org/openstack/python-senlinclient -- Dashboard: https://opendev.org/openstack/senlin-dashboard -- Tempest Plugin: https://opendev.org/openstack/senlin-tempest-plugin - -Blueprints ----------- -- Blueprints: https://blueprints.launchpad.net/senlin - -Bug Tracking ------------- -- Server Bugs: https://bugs.launchpad.net/senlin -- Client Bugs: https://bugs.launchpad.net/python-senlinclient -- Dashboard Bugs: https://bugs.launchpad.net/senlin-dashboard -- Tempest Plugin Bugs: https://bugs.launchpad.net/senlin-tempest-plugin - -Weekly Meetings ---------------- -- Schedule: every Tuesday at 1300 UTC, on #openstack-meeting channel -- Agenda: https://wiki.openstack.org/wiki/Meetings/SenlinAgenda -- Archive: http://eavesdrop.openstack.org/meetings/senlin/2015/ - -IRC ---- -IRC Channel: #senlin on `OFTC`_. - -Mailinglist ------------ -Project use http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss -as the mailinglist. Please use tag ``[Senlin]`` in the subject for new -threads. - - -.. _OFTC: https://oftc.net/ - -Release notes ------------------- -- Release notes: https://docs.openstack.org/releasenotes/senlin/ +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/TODO.rst b/TODO.rst deleted file mode 100644 index 57ae80d7b..000000000 --- a/TODO.rst +++ /dev/null @@ -1,98 +0,0 @@ -Senlin TODO Item List -===================== -This document records all workitems the team want to finish in a short-term -(usually a development cycle which lasts 6 month). All jobs listed here are NOT -in working progress which means developers can pick up any workitem they are -interested in if they do have enough time to work on it. Developer should file -a BluePrint in the launchpad to give a detailed description about their plan after -deciding to work on a specific item. A patch should be proposed as well to remove -related workitem from the TODO list after the BP gets approval. - - -HIGH PRIORITY -============= - -API ---- - - Find and fill gaps with API-WG besides the one we already identified. - - - Add support to put a cluster to maintenance mode - -ENGINE ------- - - Complete support to list of health recovery actions. - - - Add command "node adopt --profile-type --properties network.id=\ - --resource " to adopt existing server node. - * The new command should check if the provided properties are sufficient. - * There exists a need to snapshot a server before adoption. - - -MIDDLE PRIORITY -=============== - -API ---- - - Support advanced filters as suggested by the API WG: - `Filtering Guidelines`_ - -ENGINE ------- - - Add a new property "fast_scaling" to Cluster - * A standby (user invisible) cluster is created containing the extra nodes - that amount to max_size - desired_capacity - - Perform cluster scaling based on role filters - - Perform cluster checking based on role filters - - Perform cluster recovery based on role filters - -PROFILE -------- - - Add support to snapshot/restore operations for nova server profile. The - possible use case is rapid scale. - - Add support to nova server so that "block_device_mapping_v2" can reference - an existing pool of cinder volumes. - - Add support to nova server so that "network" can reference an existing - pool of neutron ports or fixed IPs. - -POLICY ------- - - Provide support for watching all objects we created on behalf of users, like - loadbalancer which is created when attaching lb policy. - - Leverage other monitoring service for object health status monitoring. - - Health policy extension for recovery action selection based on inputs - -CLIENT ------- - - Provide role-based filtering when doing 'cluster-run' - -LOW PRIORITY -============ - -ENGINE ------- - - Allow actions to be paused and resumed. This is important for some background - actions such as health checking. - - Provide support to oslo.notification and allow nodes to receive and react - to those notifications accordingly: `Autoscaling Notifications`_ - -PROFILE -------- - - Support disk property update for os.nova.server profile - -DOC ---- - - Provide a sample conf file for customizing senlin options. - -TEST ----- - - Add more Rally profile and scenario support for Senlin. - -OTHERS ------- - - Integration with Glare for profile/policy specs storage. At least we may - want to enable users to retrieve/reference heat templates from glare when - creating profiles. - - -.. _`Filtering Guidelines`: https://specs.openstack.org/openstack/api-wg/guidelines/pagination_filter_sort.html#filtering -.. _`Autoscaling Notifications`: https://ask.openstack.org/en/question/46495/heat-autoscaling-adaptation-actions-on-existing-servers/ diff --git a/api-ref/source/actions.inc b/api-ref/source/actions.inc deleted file mode 100644 index 072e262b5..000000000 --- a/api-ref/source/actions.inc +++ /dev/null @@ -1,188 +0,0 @@ -======= -Actions -======= - -Lists all actions and shows details for an action. - - -List actions -============ - -.. rest_method:: GET /v1/actions - -Lists all actions. - -Response codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - name: name_query - - target: target_query - - action: action_action_query - - status: action_status_query - -The sorting keys include ``name``, ``target``, ``action``, ``created_at`` -and ``status``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - actions: actions - - action: action_action - - cause: cause - - created_at: created_at - - data: action_data - - depended_by: depended_by - - depends_on: depends_on - - start_time: start_time - - end_time: end_time - - id: action_id - - inputs: inputs - - interval: interval - - name: name - - outputs: outputs - - owner: action_owner - - project: project - - status: action_status - - status_reason: status_reason - - target: action_target - - timeout: action_timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/actions-list-response.json - :language: javascript - - -Show action details -=================== - -.. rest_method:: GET /v1/actions/{action_id} - -Shows details for an action. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - action_id: action_id_url - -Response Parameters: - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - action: action_action - - cause: cause - - created_at: created_at - - data: action_data - - depended_by: depended_by - - depends_on: depends_on - - start_time: start_time - - end_time: end_time - - id: action_id - - inputs: inputs - - interval: interval - - name: name - - outputs: outputs - - owner: action_owner - - project: project - - status: action_status - - status_reason: status_reason - - target: action_target - - timeout: action_timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/action-get-response.json - :language: javascript - -Update action -============= - -.. rest_method:: PATCH /v1/actions/{action_id} - - min_version: 1.12 - -Update status of an action. - -This API is only available since API microversion 1.12. - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - action_id: action_id_url - - action: action - - status: action_status_update - - force: action_update_force_query - -Request Example ---------------- - -.. literalinclude:: samples/action-get-request.json - :language: javascript - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 diff --git a/api-ref/source/build_info.inc b/api-ref/source/build_info.inc deleted file mode 100644 index fc3c649e6..000000000 --- a/api-ref/source/build_info.inc +++ /dev/null @@ -1,50 +0,0 @@ -============================== -Build information (build-info) -============================== - -Shows build information for a Senlin deployment. - -Show build information -======================= - -.. rest_method:: GET /v1/build-info - -Shows build information for a Senlin deployment. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - build_info: build_info - - api: build_info_api - - engine: build_info_engine - -Response Example ----------------- - -.. literalinclude:: samples/build-show-response.json - :language: javascript - -This operation does not accept a request body. diff --git a/api-ref/source/cluster_policies.inc b/api-ref/source/cluster_policies.inc deleted file mode 100644 index 9ec738138..000000000 --- a/api-ref/source/cluster_policies.inc +++ /dev/null @@ -1,113 +0,0 @@ -=================================== -Cluster Policies (cluster-policies) -=================================== - -Lists all cluster policies and shows information for a cluster policy. - -List all cluster policies -========================= - -.. rest_method:: GET /v1/clusters/{cluster_id}/policies - -Lists all policies attached to specific cluster - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - enabled: enabled_query - - policy_name: name_query - - policy_type: type_query - - sort: sort - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - cluster_policies: cluster_policies - - cluster_id: cluster_id - - cluster_name: cluster_name - - enabled: cluster_policy_enabled - - id: cluster_policy_id - - policy_id: policy_id - - policy_name: policy_name - - policy_type: policy_type_name - -Response Example ----------------- - -.. literalinclude:: samples/cluster-policies-list-response.json - :language: javascript - - -Show cluster_policy details -=========================== - -.. rest_method:: GET /v1/clusters/{cluster_id}/policies/{policy_id} - -Shows details for a cluster policy. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - policy_id: policy_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - cluster_policy: cluster_policy - - cluster_id: cluster_id - - cluster_name: cluster_name - - enabled: cluster_policy_enabled - - id: cluster_policy_id - - policy_id: policy_id - - policy_name: policy_name - - policy_type: policy_type_name - -Response Example ----------------- - -.. literalinclude:: samples/cluster-policy-show-response.json - :language: javascript diff --git a/api-ref/source/clusters.inc b/api-ref/source/clusters.inc deleted file mode 100644 index 3fa2a46f9..000000000 --- a/api-ref/source/clusters.inc +++ /dev/null @@ -1,1158 +0,0 @@ -======== -Clusters -======== - -Lists all clusters and creates, shows information for, updates, deletes, and -triggers an action on a cluster. - - -List clusters -============= - -.. rest_method:: GET /v1/clusters - -Lists clusters. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - name: name_query - - status: status_query - -The sorting keys include ``name``, ``status``, ``init_at``, ``created_at`` -and ``updated_at``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - clusters: clusters - - created_at: created_at - - config: cluster_config - - data: cluster_data - - dependents: dependents - - desired_capacity: desired_capacity - - domain: domain - - id: cluster_id - - init_at: init_at - - max_size: max_size - - metadata: metadata - - min_size: min_size - - name: name - - nodes: cluster_nodes - - policies: cluster_policies_property - - profile_id: profile_id - - profile_name: profile_name - - project: project - - status: cluster_status - - status_reason: status_reason - - timeout: timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/clusters-list-response.json - :language: javascript - - -Create cluster -============== - -.. rest_method:: POST /v1/clusters - -Creates a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 500 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - config: cluster_config_req - - cluster: cluster - - name: cluster_name - - desired_capacity: desired_capacity - - profile_id: profile_identity_req - - min_size: min_size_req - - timeout: timeout_req - - max_size: max_size_req - - metadata: metadata_req - -Request Example ---------------- - -.. literalinclude:: samples/cluster-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - cluster: cluster - - config: cluster_config - - created_at: created_at - - data: cluster_data - - dependents: dependents - - desired_capacity: desired_capacity - - domain: domain - - id: cluster_id - - init_at: init_at - - max_size: max_size - - metadata: metadata - - min_size: min_size - - name: name - - nodes: cluster_nodes - - policies: cluster_policies_property - - profile_id: profile_id - - profile_name: profile_name - - project: project - - status: cluster_status - - status_reason: status_reason - - timeout: timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/cluster-create-response.json - :language: javascript - - -Show cluster details -==================== - -.. rest_method:: GET /v1/clusters/{cluster_id} - -Shows details for a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - cluster: cluster - - config: cluster_config - - created_at: created_at - - data: cluster_data - - dependents: dependents - - desired_capacity: desired_capacity - - domain: domain - - id: cluster_id - - init_at: init_at - - max_size: max_size - - metadata: metadata - - min_size: min_size - - name: name - - nodes: cluster_nodes - - policies: cluster_policies_property - - profile_id: profile_id - - profile_name: profile_name - - project: project - - status: cluster_status - - status_reason: status_reason - - timeout: timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/cluster-show-response.json - :language: javascript - - -Update cluster -============== - -.. rest_method:: PATCH /v1/clusters/{cluster_id} - -Updates a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - cluster: cluster - - config: cluster_config_req - - name: name_req - - profile_id: profile_identity - - timeout: timeout_req - - metadata: metadata_req - - profile_only: profile_only - -Request Example ---------------- - -.. literalinclude:: samples/cluster-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - cluster: cluster - - config: cluster_config - - created_at: created_at - - data: cluster_data - - dependents: dependents - - desired_capacity: desired_capacity - - domain: domain - - id: cluster_id - - init_at: init_at - - max_size: max_size - - metadata: metadata - - min_size: min_size - - name: name - - nodes: cluster_nodes - - policies: cluster_policies_property - - profile_id: profile_id - - profile_name: profile_name - - project: project - - status: cluster_status - - status_reason: status_reason - - timeout: timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/cluster-update-response.json - :language: javascript - - -Delete cluster -============== - -.. rest_method:: DELETE /v1/clusters/{cluster_id} - -Deletes a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - -Resize a Cluster -================ - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Resize a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - adjustment_type: adjustment_type - - number: adjustment_number - - min_size: adjustment_min_size - - max_size: adjustment_max_size - - min_step: adjustment_min_step - - strict: adjustment_strict - - -The ``action_name`` in the request body has to be ``resize``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-resize-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Scale-in a Cluster -=================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Shrink the size of a cluster by a given number. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - count: scale_count - - -The ``action_name`` in the request body has to be ``scale_in``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-scale-in-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Scale-out a Cluster -=================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Expand the size of a cluster by a given number. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - count: scale_count - - -The ``action_name`` in the request body has to be ``scale_out``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-scale-out-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Add nodes to a Cluster -====================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Add the specified list of nodes to the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - nodes: cluster_member_nodes - - -The ``action_name`` in the request body has to be ``add_nodes``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-add-nodes-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Remove nodes from a Cluster -=========================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Remove the specified list of nodes from the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - nodes: cluster_member_nodes - - destroy_after_deletion: destroy_after_deletion - - -The ``action_name`` in the request body has to be ``del_nodes``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-del-nodes-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Replace nodes in a Cluster -=========================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Replace the specified nodes in a cluster. - -This API is only available since API microversion 1.3. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - nodes: cluster_replace_nodes - - -The ``action_name`` in the request body has to be ``replace_nodes``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-replace-nodes-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Attach a Policy to a Cluster -============================ - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Attach the specified policy to the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - policy_id: policy_identity - - enabled: cluster_policy_enabled - -The ``action_name`` in the request body has to be ``policy_attach``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-attach-policy-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Detach a Policy from a Cluster -============================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Detach the specified policy from the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - policy_id: policy_identity - -The ``action_name`` in the request body has to be ``policy_detach``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-detach-policy-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Update a Policy on a Cluster -============================ - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Update the specified policy on the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - policy_id: policy_identity - - enabled: cluster_policy_enabled - -The ``action_name`` in the request body has to be ``update_policy``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-update-policy-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Collect Attributes Across a Cluster -=================================== - -.. rest_method:: GET /v1/clusters/{cluster_id}/attrs/{path} - -Aggregate an attribute value across all nodes in a cluster. - -This API is only available since API microversion 1.2. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - path: path_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - cluster_attributes: cluster_attributes - - id: node_id - - value: attr_value - -Check a Cluster's Health Status -=============================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Check the health status of all nodes in a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - params: check_params - -The ``action_name`` in the request body has to be ``check``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-check-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Recover a Cluster to a Healthy Status -===================================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Recover the health status for all nodes in a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - params: recover_params - -The ``action_name`` in the body must be ``recover``. The valid parameters -include: - -- ``operation``: A string specifying the action to be performed for node - recovery. - -- ``operation_params``: An optional dictionary specifying the key-value - arguments for the specific node recovery action. - -- ``check``: A boolean specifying whether the engine should check the actual - statuses of cluster nodes before performing the recovery action. This - parameter is added since microversion 1.6 and it defaults to False. - -- ``check_capacity``: A boolean specifying whether check the current number of - nodes and the ``desired_capacity`` field. Will delete nodes if the number of - nodes is larger than ``desired_capacity``, otherwise, create nodes. This - parameter is added since microversion 1.7 and it defaults to False. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-recover-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Perform an Operation on a Cluster -================================= - -.. rest_method:: POST /v1/clusters/{cluster_id}/ops - -Perform an operation on the specified cluster. The specified operation and its -associated parameters must validate against the profile type of the cluster. - -This API is only available since API microversion 1.4. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - operation: cluster_operation_request - -Request Example ---------------- - -.. literalinclude:: samples/cluster-operation-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Complete Lifecycle on a Cluster action -====================================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Complete lifecycle action and trigger deletion of nodes. - -This API is only available since API microversion 1.9. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - lifecycle_action_token: lifecycle_token_id - -The ``action_name`` in the body must be ``complete_lifecycle``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-complete-lifecycle-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index 4cf19aaba..000000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,205 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# senlin documentation build configuration file, created by -# sphinx-quickstart on Sat May 1 15:17:47 2010. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - -extensions = [ - 'os_api_ref', - 'openstackdocstheme', -] - -html_theme = 'openstackdocs' -html_theme_options = { - "sidebar_mode": "toc", -} - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = u'2015-present, OpenStack Foundation' - -# openstackdocstheme options -repository_name = 'openstack/senlin' -bug_project = 'senlin' -bug_tag = 'api-ref' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'senlindoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Senlin.tex', u'OpenStack Clustering API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/events.inc b/api-ref/source/events.inc deleted file mode 100644 index e2b9a7d65..000000000 --- a/api-ref/source/events.inc +++ /dev/null @@ -1,129 +0,0 @@ -=============== -Events (events) -=============== - -Lists all events and shows information for an event. - -List events -=========== - -.. rest_method:: GET /v1/events - -Lists all events. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - level: event_level_req - - marker: marker - - sort: sort - - global_project: global_project - - oid: oid_query - - otype: otype_query - - oname: oname_query - - cluster_id: cluster_identity_query - - action: action_name_query - -The sorting keys include ``timestamp``, ``level``, ``otype``, ``oname``, -``action``, ``status``, ``oid`` and ``cluster_id``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - events: events - - action: action_name - - cluster_id: cluster_id - - id: event_id - - level: event_level - - oid: oid - - oname: oname - - otype: otype - - project: project - - status: event_status - - status_reason: status_reason - - timestamp: event_timestamp - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/events-list-response.json - :language: javascript - - -Shows event details -=================== - -.. rest_method:: GET /v1/events/{event_id} - -Shows details for an event. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - event_id: event_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - event: event - - action: action_name - - cluster_id: cluster_id - - id: event_id - - level: event_level - - oid: oid - - oname: oname - - otype: otype - - project: project - - status: event_status - - status_reason: status_reason - - timestamp: event_timestamp - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/event-show-response.json - :language: javascript diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index 09da4260f..000000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -:tocdepth: 2 - -============== -Clustering API -============== - -.. rest_expand_all:: - -.. include:: versions.inc -.. include:: build_info.inc -.. include:: profile_types.inc -.. include:: profiles.inc -.. include:: policy_types.inc -.. include:: policies.inc -.. include:: clusters.inc -.. include:: cluster_policies.inc -.. include:: nodes.inc -.. include:: receivers.inc -.. include:: events.inc -.. include:: webhooks.inc -.. include:: actions.inc -.. include:: services.inc diff --git a/api-ref/source/nodes.inc b/api-ref/source/nodes.inc deleted file mode 100644 index 1e5a03865..000000000 --- a/api-ref/source/nodes.inc +++ /dev/null @@ -1,654 +0,0 @@ -===== -Nodes -===== - -Lists all nodes, and creates, shows information for, updates, deletes a node. - - -List nodes -========== - -.. rest_method:: GET /v1/nodes - -Lists all nodes. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - cluster_id: cluster_identity_query - - name: name_query - - status: status_query - -The sorting keys include ``name``, ``index``, ``status``, ``init_at``, -``created_at`` and ``updated_at``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - nodes: nodes - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - dependents: dependents - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-list-response.json - :language: javascript - - -Create node -=========== - -.. rest_method:: POST /v1/nodes - -Creates a node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - node: node - - role: role_req - - profile_id: profile_identity_req - - cluster_id: node_cluster_identity - - name: node_name - - metadata: metadata_req - -Request Example ---------------- - -.. literalinclude:: samples/node-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - node: node - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - dependents: dependents - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-create-response.json - :language: javascript - - -Adopt node -========== - -.. rest_method:: POST /v1/nodes/adopt - - min_version: 1.7 - -Adopts a node. - -This API is only available since API microversion 1.7. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - identity: identity - - metadata: metadata_req - - name: node_name_adopt - - overrides: overrides - - role: role_req - - snapshot: snapshot - - type: profile_type_name - -Request Example ---------------- - -.. literalinclude:: samples/node-adopt-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - node: node - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-adopt-response.json - :language: javascript - - -Adopt node (preview) -==================== - -.. rest_method:: POST /v1/nodes/adopt-preview - - min_version: 1.7 - -Preview a node adoption. - -This API is only available since API microversion 1.7. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - identity: identity - - overrides: overrides - - snapshot: snapshot - - type: profile_type_name - -Request Example ---------------- - -.. literalinclude:: samples/node-adopt-preview-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - node_preview: node_preview - - cluster_id: cluster_id - - type: profile_type_name - - version: profile_type_version - - properties: profile_spec - -Response Example ----------------- - -.. literalinclude:: samples/node-adopt-preview-response.json - :language: javascript - - -Show node details -================= - -.. rest_method:: GET /v1/nodes/{node_id} - -Shows details about a node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - node_id: node_id_url - - show_details: show_details - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - node: node - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - dependents: dependents - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-show-response.json - :language: javascript - - -Update node -=========== - -.. rest_method:: PATCH /v1/nodes/{node_id} - -Updates a node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - node_id: node_id_url - - node: node - - name: name_req - - profile_id: profile_identity - - role: role_req - - metadata: metadata_req - - tainted: tainted_req - -Request Example ---------------- - -.. literalinclude:: samples/node-update-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - node: node - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - dependents: dependents - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-show-response.json - :language: javascript - - -Delete node -=========== - -.. rest_method:: DELETE /v1/nodes/{node_id} - -Deletes a node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - node_id: node_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - -Check a Node's Health -===================== - -.. rest_method:: POST /v1/nodes/{node_id}/actions - -Check the health status of the specified node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - node_id: node_id_url - - action: action_request - -The ``action_name`` in the body must be ``check``. - -Request Example ---------------- - -.. literalinclude:: samples/node-check-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/node-action-response.json - :language: javascript - - - -Recover a Node to Healthy Status -================================ - -.. rest_method:: POST /v1/nodes/{node_id}/actions - -Recover the specified node to its healthy status. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - node_id: node_id_url - - action: action_request - -The ``action_name`` in the body must be ``recover``. The valid parameters -include: - -- ``operation``: A string specifying the action to be performed for node - recovery. - -- ``operation_params``: An optional dictionary specifying the key-value - arguments for the specific node recovery action. - -- ``check``: A boolean specifying whether the engine should check the node's - actual status before performing the recovery action. This parameter is added - since microversion 1.6. - - -Request Example ---------------- - -.. literalinclude:: samples/node-recover-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/node-action-response.json - :language: javascript - - -Perform an Operation on a Node -============================== - -.. rest_method:: POST /v1/nodes/{node_id}/ops - - min_version: 1.4 - -Perform the specified operation on the specified node. - -This API is only available since API microversion 1.4. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - node_id: node_id_url - - operation: operation_request - -Request Example ---------------- - -.. literalinclude:: samples/node-operation-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/node-action-response.json - :language: javascript diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml deleted file mode 100644 index 5e3060c2f..000000000 --- a/api-ref/source/parameters.yaml +++ /dev/null @@ -1,1546 +0,0 @@ -#### header parameters ####################################################### - -location: - type: string - in: header - required: True - description: | - For asynchronous object operations, the ``location`` header contains a - string that can be interpreted as a relative URI from where users can - track the progress of the action triggered. - -microversion: - type: string - in: header - description: | - API microversion request. It takes the form of - ``OpenStack-API-Version: clustering 1.0``, where ``1.0`` is the requested - API version. - -request_id: - type: string - in: header - description: | - A unique ID for tracking service request. The request ID associated - with the request by default appears in the service logs. - -#### path parameters ######################################################### - -action_id_url: - type: string - in: path - required: True - description: | - The name or short-ID or UUID that identifies an action object. - -cluster_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of a cluster object. - -event_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of an event object. - -node_id_url: - type: string - in: path - required: True - description: | - The name, short-ID or UUID of a node object. - -path_url: - type: string - in: path - required: True - description: | - A Json path format string for node attribute. - -policy_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of a policy object. - -policy_type_url: - type: string - in: path - required: True - description: | - The name of a policy type. - -profile_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of a profile. - -profile_type_url: - type: string - in: path - required: True - description: | - The name of a profile type. - -receiver_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of a receiver object. - -version_url: - type: string - in: path - required: True - description: | - A string indicating the major version of Clustering API. - -webhook_id_url: - type: UUID - in: path - required: True - description: | - The UUID of a webhook object. - -#### query parameters ######################################################## - -action_action_query: - type: string - in: query - description: | - Filters the resulted list using the ``action`` field of the object. - -action_name_query: - type: string - in: query - description: | - Filters the response by the action name associated with an event. - Use this filter multiple times to filter by multiple actions. - -action_status_query: - type: string - in: query - description: | - Filters the results by the ``status`` property of an action object. - -action_update_force_query: - type: boolean - in: query - description: | - A boolean indicating if the action update request should be forced. - -cluster_identity_query: - type: string - in: query - description: | - The name, short-ID or UUID of the cluster object. - -enabled_query: - type: string - in: query - description: | - Filters the response by a policy enabled status on the cluster. - -global_project: - type: boolean - in: query - default: False - description: | - Indicates whether to include resources for all projects or resources for - the current project in the response. - - If you are an administrative user and you set this value to ``true``, the - call returns all resources from all projects. Default is ``false``, which - returns only resources in the current project. - -limit: - type: integer - in: query - description: | - Requests a page size of resources. Returns a number of resources up to the - limit value. Use the `limit` parameter to make an initial limited request - and use the ID of the last-seen resource from the response as the `marker` - parameter value in a subsequent limited request. - -marker: - type: UUID - in: query - description: | - The ID of the last-seen resource. Use the `limit` parameter to make an - initial limited request and use the ID of the last-seen resource from the - response as the `marker` parameter value in a subsequent limited request. - -name_query: - type: string - in: query - description: | - Filters the response by the specified ``name`` property of the object, - such as ``policy_name`` or ``name`` property of cluster. - -oid_query: - type: string - in: query - description: | - Filters the response by the ``ID`` of object associated with an event. - Use this filter multiple times to filter by multiple objects. - -oname_query: - type: string - in: query - description: | - Filters the response by the ``name`` of object associated with an event. - Use this filter multiple times to filter by multiple objects. - -otype_query: - type: string - in: query - description: | - Filters the response by the ``type`` of object associated with an event. - Use this filter multiple times to filter by multiple objects. A valid - value is ``CLUSTER`` or ``NODE``. - -receiver_action_query: - type: string - in: query - description: | - Filters the response by the action targeted by the receiver. - -receiver_type_query: - type: string - in: query - description: | - Filters the response by the ``type`` property of the receiver. - -receiver_user_query: - type: string - in: query - description: | - Filters the response by the ``user`` property of the receiver. - min_version: 1.4 - -show_details: - type: boolean - in: query - default: False - required: False - description: | - A boolean indicating whether the detailed information about the physical - resource associated with the node object will be returned. - -sort: - type: string - in: query - description: | - Sorts the response by one or more attribute and optional sort direction - combinations. A valid direction is ``asc`` (ascending) or ``desc`` - (descending). Default direction is ``asc`` (ascending). - - Specify the list as ``[:]``. - - For example, the following query parameters in the URI sort the resources - in the response by ``name`` in ascending order and then by ``status`` in - descending order:: - - GET /v1/clusters?sort=name:asc,status:desc - -status_query: - type: string - in: query - description: | - Filters the resource collection by the ``status`` property. - -target_query: - type: string - in: query - description: | - Filters the results by the UUID of the targeted object which is usually - a cluster. - -type_query: - type: string - in: query - description: | - Filters the response by the specified ``type`` property of the object, - such as ``policy_type`` property of cluster-policy binding object or - ``type`` property of policy object. - -user_query: - type: UUID - in: query - description: | - Filters the response by the ``user`` property of the resource. - -webhook_params: - type: object - in: query - description: | - The query string that forms the inputs to use for the targeted action - for API microversion less than 1.10. - -webhook_version: - type: string - in: query - required: True - description: | - The webhook implementation version requested. - - -#### body parameters ######################################################### - -action: - type: object - in: body - required: True - description: | - A structured definition of an action object. - -action_action: - type: string - in: body - required: True - description: | - A string representation of the action for execution. - -action_data: - type: object - in: body - required: True - description: | - A structured representation of data associated with an action object. - -action_id: - type: UUID - in: body - required: True - description: | - A UUID that uniquely identifies an action object. - -action_name: - type: string - in: body - required: True - description: | - The name of an action object. - -action_owner: - type: string - in: body - required: True - description: | - The UUID of the owning engine that is currently locking the action for - execution. - -action_request: - type: object - in: body - required: True - description: | - A structured definition of an action to be executed. The object is - usually expressed as:: - - : { - : - : - ... - } - - The ```` indicates the requested action while the ```` - keys provide the associated parameters to the action. Each individual - action has its own set of parameters. - -action_status: - type: string - in: body - required: True - description: | - A string representation of the current status of the action. - -action_status_update: - type: string - in: body - required: True - description: | - A string representation of the action status to update. CANCELLED is - the only valid status at this time. - -action_target: - type: string - in: body - required: True - description: | - The UUID of the targeted object (which is usually a cluster). - -action_timeout: - type: integer - in: body - required: True - description: | - The number of seconds after which an unfinished action execution will be - treated as timeout. - -actions: - type: array - in: body - required: True - description: | - A list of action objects. - -adjustment_max_size: - type: integer - in: body - description: | - The value to be set as the new ``max_size`` of the cluster. - -adjustment_min_size: - type: integer - in: body - description: | - The value to be set as the new ``min_size`` of the cluster. - -adjustment_min_step: - type: integer - in: body - description: | - When ``adjustment_type`` is set to ``CHANGE_IN_PERCENTAGE``, often times - the computed value is a float which could be less than 1.0. The - ``min_step`` can be used to specify that at least this number of nodes will - be added or removed. - -adjustment_number: - type: number - in: body - description: | - The number of adjustment. The interpretation of the value depends on the - value of the ``adjustment_type`` parameter. This parameter is mandatory - when ``adjustment_type`` is specified. Otherwise, it is optional. - - When ``adjustment_type`` is specified as ``CHANGE_IN_PERCENTAGE``, the - value of this parameter can be a float number, otherwise it has to be an - integer. - -adjustment_strict: - type: boolean - in: body - default: False - description: | - There are cases where the computed number of nodes to adjust will break - the size constraints of a cluster, i.e. its ``min_size`` or ``max_size`` - property. If this is the case, the ``strict`` parameter can further - instructs the senlin engine whether the resize should be done on a best - effort basis. If the value is set to True, senlin engine will perform the - resize operation while respecting the cluster's size constraints. - Otherwise, if the computed adjustment will break the size constraints, the - resize request will be directly rejected. - -adjustment_type: - type: string - in: body - description: | - The type of size adjustment. The valid values are: - - - ``EXACT_CAPACITY``: The adjustment number specified is to be interpreted - as the targeted ``desired_capacity``. This value has to be a non-negative - integer. - - ``CHANGE_IN_CAPACITY``: The adjustment number specified is to be treated - as the number of nodes to add or remove. The value has to be a non-zero - integer. A positive number can be used to specify the number of nodes to - add while a negative number can be specified to indicate the number of - nodes to remove. - - ``CHANGE_IN_PERCENTAGE``: The adjustment number will be interpreted as - a percentile relative to a cluster's current ``desired_capacity``. The - adjustment number can be a positive or negative float value. - - This parameter is optional when a resize request is only about changing the - ``min_size`` and/or ``max_size`` of the cluster. Otherwise, it is required. - When this parameter is specified, the ``number`` parameter has to be - provided as well. - -attr_value: - type: object - in: body - description: | - The attribute value on a specific node. The value could be of any data - type that is valid for the attribute. - -binary: - type: string - in: body - required: True - description: | - The binary name of the service. - -build_info: - type: object - in: body - required: True - description: | - Build information for a Senlin deployment. - -build_info_api: - type: object - in: body - required: True - description: | - Revision information of Senlin API service. - -build_info_engine: - type: object - in: body - required: True - description: | - Revision information of Senlin engine service. - -cause: - type: string - in: body - required: True - description: | - An explanation why an action was started. - -check_params: - type: object - in: body - description: | - The optional parameters provided to a cluster check operation. The detailed - keys and values are not checked at the moment. - -cluster: - type: object - in: body - required: True - description: | - The structured definition of a cluster object. - -cluster_attributes: - type: array - in: body - required: True - description: | - A list of dictionaries each containing the node ID and the corresponding - attribute value. - -cluster_config: - type: object - in: body - required: True - description: | - The structured config associated with the cluster. - -cluster_config_req: - type: object - in: body - required: False - description: | - The structured config associated with the cluster. - -cluster_data: - type: object - in: body - required: True - description: | - The structured data associated with the cluster. - -cluster_id: - type: UUID - in: body - required: True - description: | - The UUID of the cluster object. - -cluster_identity: - type: UUID - in: body - required: False - description: | - The ID, short ID or name of a cluster which the adopted node is supposed - to join. - -cluster_member_nodes: - type: array - in: body - required: True - description: | - The candidate nodes to be added to or removed from a cluster. The meaning - of the parameter is depended on the action requested. - - Each item in the list can be the name, the short-ID or the UUID of a node. - -cluster_name: - type: string - in: body - required: True - description: | - The name of a cluster object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -cluster_nodes: - type: array - in: body - required: True - description: | - A list of the UUIDs of node objects which are members of the current - cluster. - -cluster_operation_request: - type: object - in: body - required: True - description: | - A structured definition of an operation to be performed. The object is - usually expressed as:: - - : { - filters: { - : , - : - } - params: { - : , - : - ... - } - } - - The ```` specifies the operation to be performed, in which - the ``filters`` object contains a collection of filtering rules, and the - ``params`` object provide the parameters (if any) to the operation. - Each individual operation has its own set of parameters, as supported by - the profile type of the target cluster. - -cluster_policies: - type: array - in: body - required: True - description: | - A list of cluster_policy objects. - -cluster_policies_property: - type: array - in: body - required: True - description: | - A list of UUIDs of the policies attached to current cluster. - -cluster_policy: - type: object - in: body - required: True - description: | - The structured description of a cluster_policy object. - -cluster_policy_enabled: - type: boolean - in: body - required: True - description: | - Whether the policy is enabled on the attached cluster. - -cluster_policy_id: - type: UUID - in: body - required: True - description: | - The UUID of a cluster_policy object. - -cluster_replace_nodes: - type: object - in: body - required: True - description: | - A collection of key-value pairs. Each key is the node to be replaced of a - cluster, each value is the node used to replace the original one. - - Each item in of the key-value pairs can be the name, the short-ID or the - UUID of a node. - -cluster_status: - type: string - in: body - required: True - description: | - The string representation of the current status of the cluster. - -clusters: - type: array - in: body - required: True - description: | - A list of cluster objects. - -created_at: - type: string - in: body - required: True - description: | - The date and time when the object was created. The date and time stamp - format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. For example: - ``2016-01-18T00:00:00Z`` - -depended_by: - type: array - in: body - required: True - description: | - A list of UUIDs of the actions that depend on the current action. - -dependents: - type: object - in: body - required: True - description: | - A dict contains dependency information between nova server, heat stack - cluster and container cluster. - -depends_on: - type: array - in: body - required: True - description: | - A list of UUIDs of the actions that the current action depends on. - -desired_capacity: - type: integer - in: body - required: True - description: | - The desired capacity of a cluster. When creating a cluster, this value is - set to 0 by default. - -destroy_after_deletion: - type: boolean - in: body - required: False - description: | - Whether deleted nodes to be destroyed right away. - min_version: 1.4 - -disabled_reason: - type: string - in: body - required: False - description: | - The reason for disabling a service. - -domain: - type: UUID - in: body - required: True - description: | - The ID of the domain a resource is created in. - -end_time: - type: float - in: body - required: True - description: | - A floating point number that represents when an action's execution has - completed. - -event: - type: object - in: body - required: True - description: | - The structured description of an event object. - -event_id: - type: UUID - in: body - required: True - description: | - The UUID of an event object. - -event_level: - type: string - in: body - required: True - description: | - The level of an event object. - -event_level_req: - type: string - in: body - required: False - description: | - The level of an event object. - -event_status: - type: string - in: body - required: True - description: | - The current status of the object associated with the event. - -event_timestamp: - type: string - in: body - required: True - description: | - The date and time when the event was generated. The date and time stamp - format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. - -events: - type: array - in: body - required: True - description: | - A list of event objects. - -host: - type: string - in: body - required: True - description: | - The name of the host. - -identity: - type: string - in: body - required: True - description: | - The ID or name of the physical resource to be adopted. - -index: - type: integer - in: body - required: True - description: | - An integer that uniquely identifies a node within its owning cluster. - -init_at: - type: string - in: body - required: True - description: | - The date and time when the object was initialized. The date and - time stamp format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. For example: - ``2016-01-18T00:00:00Z`` - -inputs: - type: object - in: body - required: True - description: | - A collection of key-value pairs that are fed to the action as input - parameters. - -interval: - type: integer - in: body - required: True - description: | - An integer that indicates the interval in seconds between two consecutive - executions of a repeatable action. - -lifecycle_token_id: - type: UUID - in: body - required: True - description: | - The UUID of the lifecycle action to be completed. - -max_size: - type: integer - in: body - required: True - description: | - The maximum size of a cluster, i.e. the maximum number of nodes that can - be members of the cluster. A value of -1 means that the cluster doesn't - have an upper bound regarding the number of member nodes. - -max_size_req: - type: integer - default: -1 - in: body - required: False - description: | - The maximum size of a cluster, i.e. the maximum number of nodes that can - be members of the cluster. A value of -1 means that the cluster doesn't - have an upper bound regarding the number of member nodes. - -metadata: - type: object - in: body - required: True - description: | - A collection of key-value pairs associated with an object. - -metadata_req: - type: object - in: body - description: | - A collection of key-value pairs associated with an object. - -min_size: - type: integer - in: body - required: True - description: | - The minimum size of a cluster, i.e. the minimum number of nodes that can - be members of the cluster. - -min_size_req: - type: integer - default: 0 - in: body - required: False - description: | - The minimum size of a cluster, i.e. the minimum number of nodes that can - be members of the cluster. - -name: - type: string - in: body - required: True - description: - The name of the object in question. - -name_req: - type: string - in: body - required: False - description: - The new name of the object in question. - -node: - type: object - in: body - required: True - description: | - A structured description of a node object. - -node_cluster_identity: - type: string - in: body - required: False - description: | - The name, short-ID or UUID of the cluster object a node belongs to. - -node_data: - type: object - in: body - required: True - description: | - A map containing key-value pairs associated with a node object. - -node_id: - type: UUID - in: body - required: True - description: | - A UUID string that uniquely identifies a node object. - -node_name: - type: string - in: body - required: True - description: | - The name of a node object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -node_name_adopt: - type: string - in: body - required: False - description: | - The name of a node object. If specified, the name must start with an ASCII - letter and can contain ASCII letters, digits, underscores, periods, and - hyphens and its length must be less than 255. - -node_preview: - type: object - in: body - required: True - description: | - A structured representation of the node to be adopted. Note this is a - preview version which only contains the spec of the profile to be created. - -node_status: - type: string - in: body - required: True - description: | - The string representation of the current status of the node object. - -nodes: - type: array - in: body - required: True - description: | - A list of node objects. - -oid: - type: UUID - in: body - required: True - description: | - The UUID of an object associated with the event. - -oname: - type: string - in: body - required: True - description: | - The name of an object associated with the event. - -operation_request: - type: object - in: body - required: True - description: | - A structured definition of an operation to be performed. The object is - usually expressed as:: - - : { - : - : - ... - } - - The ```` specifies the operation to be performed while the - ```` keys provide the parameters (if any) to the operation. Each - individual operation has its own set of parameters, as supported by the - profile type of the target cluster or node. - -operations: - type: object - in: body - required: True - description: | - A dictionary containing the description of operations (and parameters) - supported by a profile type. - -otype: - type: string - in: body - required: True - description: | - The type of an object associated with the event. - -outputs: - type: object - in: body - required: True - description: | - A collection of key-value pairs that were produced during the execution of - an action as its outputs. - -overrides: - type: object - in: body - required: False - description: | - If specified, provides a collection of key-value pairs that will override - the property name and values extracted from the spec extracted from the - existing physical node. - -physical_id: - type: UUID - in: body - required: True - description: | - The UUID of the physical resource represented by the node object. - -policies: - type: array - in: body - required: True - description: | - A list of policy objects. - -policy: - type: object - in: body - required: True - description: | - A structured description of a policy object. - -policy_data: - type: object - in: body - required: True - description: | - A structured representation of data associated with a policy object. - -policy_id: - type: UUID - in: body - required: True - description: | - The UUID of a policy object. - -policy_identity: - type: string - in: body - required: True - description: | - The name, UUID or short-UUID of a policy object. - -policy_name: - type: string - in: body - required: True - description: | - The name of a policy object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -policy_spec: - type: object - in: body - required: True - description: | - The detailed specification of a policy object. - -policy_type: - type: object - in: body - required: True - description: | - A structured description of a policy type. Since API micro-version 1.5, - a "support_status" property is returned which contains a list - of support status changes. - -policy_type_name: - type: string - in: body - required: True - description: | - The name of the policy type. - -policy_type_schema: - type: object - in: body - required: True - description: | - The schema of a policy type. The schema of a policy type varies a lot - based on the specific type implementation. - -policy_types: - type: array - in: body - required: True - description: | - A list of policy_type objects. Since API micro-version 1.5, each record - in the list will have a "support_status" property which contains a list - of support status changes. - -profile: - type: object - in: body - required: True - description: | - A structured description of a profile object. - -profile_id: - type: UUID - in: body - required: True - description: | - The UUID of the profile. - -profile_identity: - type: string - in: body - required: False - description: | - The name, short-ID, or UUID of a profile. - -profile_identity_req: - type: string - in: body - required: True - description: | - The name, short-ID, or UUID of a profile. - -profile_name: - type: string - in: body - required: True - description: | - The name of a profile object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -profile_only: - type: boolean - in: body - required: False - description: | - Whether the update of profile is limited to the target cluster. All nodes - in the cluster will be updated with the specified new profile if this - parameter is set to False. The default value is False. - min_version: 1.6 - -profile_spec: - type: object - in: body - required: True - description: | - The detailed specification of the profile. - -profile_type: - type: object - in: body - required: True - description: | - A structured description of a profile type. Since API micro-version 1.5, - a "support_status" property is returned which contains a list - of support status changes. - -profile_type_name: - type: string - in: body - required: True - description: | - The name of the profile type. - -profile_type_schema: - type: object - in: body - required: True - description: | - The schema of a profile type. The schema of a profile type varies - a lot based on the specific type implementation. All profile types - share the ``context`` property which is a dictionary for customizing - the request context to authenticate with a backend service. A common - usage of this property is to set the ``region_name`` in the dictionary - so that a node can be created in the specified region. All other - properties are defined by a particular profile type implementation. - -profile_type_version: - type: string - in: body - required: True - description: | - The version of the profile type. - -profile_types: - type: array - in: body - required: True - description: | - A list of profile_type objects. Since API micro-version 1.5, each record - in the list will have a "support_status" property which contains a list - of support status changes. - -profiles: - type: array - in: body - required: True - description: | - A list for profile objects. - -project: - type: UUID - in: body - required: True - description: | - The ID of the project a resource is created in. - -receiver: - type: object - in: body - required: True - description: | - The structured definition of a receiver object. - -receiver_action: - type: string - in: body - description: | - The action to initiate when the receiver is triggered. A valid value - should be the name of an action that can be applied on a cluster. - -receiver_action_req: - type: string - in: body - required: False - description: | - The action to initiate when the receiver is triggered. A valid value - should be the name of an action that can be applied on a cluster. - -receiver_actor: - type: object - in: body - required: False - description: | - A map of key and value pairs to use for authentication. - -receiver_channel: - type: object - in: body - required: True - description: | - The target to be used by user to trigger a receiver. For webhook type - of receiver, channel is a webhook URL. - -receiver_cluster_identity: - type: string - in: body - description: | - The name, short-ID or UUID of the cluster object a node belongs to. - -receiver_id: - type: UUID - in: body - required: True - description: | - The UUID of the receiver object. - -receiver_name: - type: string - in: body - required: True - description: | - The name of a receiver object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -receiver_params: - type: object - in: body - required: True - description: | - A map of key and value pairs to use for action creation. - -receiver_params_req: - type: object - in: body - required: False - description: | - A map of key and value pairs to use for action creation. Some actions - might require certain input parameters. - -receiver_type: - type: string - in: body - required: True - description: | - The type of the receiver. - -receiver_type_req: - type: string - in: body - required: True - description: | - The type of the receiver. The valid values include ``webhook`` and - ``message``. - -receivers: - type: array - in: body - required: True - description: | - A list for receiver objects. - -recover_params: - type: object - in: body - description: | - The optional parameters provided to a cluster recover operation. The - detailed keys and values are not checked at the moment. - -role: - type: string - in: body - required: True - description: | - A string describing the role played by a node inside a cluster. - -role_req: - type: string - in: body - description: | - A string describing the new role played by a node inside a cluster. - -scale_count: - type: integer - in: body - default: 1 - description: | - The number of new nodes to add to or remove from the specified cluster. - The interpretation is depending on the action requested. Default value is - 1. - -service_id: - type: UUID - in: body - required: True - description: | - A UUID that uniquely identifies an service object. - -service_state: - type: string - in: body - required: True - description: | - The state of the service. One of ``up`` or ``down``. - -service_status: - type: string - in: body - required: True - description: | - The status of the service. One of ``enabled`` or ``disabled``. - -services: - type: array - in: body - required: True - description: | - A list of service. - -snapshot: - type: bool - in: body - required: False - description: | - A flat indicating whether a shapshot of the existing physical object should - be created before the object is adopted as a node. - -start_time: - type: float - in: body - required: True - description: | - A floating point number that represents the time when an action started - execution. - -status_reason: - type: string - in: body - required: True - description: | - The string representation of the reason why the object has transited to - its current status. - -tainted: - type: bool - in: body - required: True - description: | - A boolean indicating whether a node is considered tainted. Tainted nodes - are selected first during scale-in operations. This field is only - returned starting with API microversion 1.13 or greater. - -tainted_req: - type: bool - in: body - required: False - description: | - A boolean indicating whether a node is considered tainted. Tainted nodes - are selected first during scale-in operations. This parameter is only - accepted starting with API microversion 1.13 or greater. - -timeout: - type: integer - in: body - required: True - description: | - The default timeout value (in seconds) of cluster operations. - -timeout_req: - type: integer - in: body - required: False - description: | - The new timeout value (in seconds) of cluster operations. - -topic: - type: string - in: body - required: True - description: | - The topic name of the service. - -updated_at: - type: string - in: body - required: True - description: | - The date and time when the object was last updated. The date and time - stamp format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. For example: - ``2016-01-18T00:00:00Z`` - -user: - type: UUID - in: body - required: True - description: | - The ID of the user an object is created by. - -version: - type: object - in: body - required: True - description: | - The details about a major API version. - -version_id: - type: string - in: body - required: True - description: | - The string representation of an API version number, e.g. ``1.0``. - -version_links: - type: array - in: body - required: True - description: | - A list of relative URLs to different version objects. - -version_max_version: - type: string - in: body - required: True - description: | - The string representation of the maximum microversion supported. - -version_media_types: - type: array - in: body - required: True - description: | - A list of content-type based media type request supported. - -version_min_version: - type: string - in: body - required: True - description: | - The string representation of the minimum microversion supported. - -version_status: - type: string - in: body - required: True - description: | - A string indicating the supporting status of the version. - -version_updated: - type: string - in: body - required: True - description: | - The date and time when the version was last updated. The date and time - stamp format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. For example: - ``2016-01-18T00:00:00Z`` - -versions: - type: array - in: body - required: True - description: | - A list of supported major API versions. diff --git a/api-ref/source/policies.inc b/api-ref/source/policies.inc deleted file mode 100644 index f0c28e6dd..000000000 --- a/api-ref/source/policies.inc +++ /dev/null @@ -1,358 +0,0 @@ -=================== -Policies (policies) -=================== - -Lists all policies and creates, shows information for, updates, and deletes a -policy. - - -List policies -============= - -.. rest_method:: GET /v1/policies - -Lists all policies. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - name: name_query - - type: type_query - -The sorting keys include ``name``, ``type``, ``created_at`` and -``udpated_at``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - policies: policies - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-list-response.json - :language: javascript - - -Create policy -============= - -.. rest_method:: POST /v1/policies - -Creates a policy. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy: policy - - name: policy_name - - spec: policy_spec - -Request Example ---------------- - -.. literalinclude:: samples/policy-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - policy: policy - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-create-response.json - :language: javascript - - -Show policy details -=================== - -.. rest_method:: GET /v1/policies/{policy_id} - -Shows details for a policy. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy_id: policy_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - policy: policy - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-show-response.json - :language: javascript - - -Update policy -============= - -.. rest_method:: PATCH /v1/policies/{policy_id} - -Updates a policy. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy_id: policy_id_url - - policy: policy - - name: name - -Note that the only property that can be updated on a policy object after -creation is ``name``. - -Request Example ---------------- - -.. literalinclude:: samples/policy-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - policy: policy - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-update-response.json - :language: javascript - - -Delete policy -============= - -.. rest_method:: DELETE /v1/policies/{policy_id} - -Deletes a policy. - -Response Codes --------------- - -A policy cannot be deleted if it is still attached to cluster(s). In that -case, a 409 error will be returned. - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy_id: policy_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - -Validate policy -=============== - -.. rest_method:: POST /v1/policies/validate - -Validates a policy. - -This API is only available since API microversion 1.2. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy: policy - - spec: policy_spec - -Request Example ---------------- - -.. literalinclude:: samples/policy-validate-request.json - :language: javascript - -Response Parameters -------------------- - -The response contains properties as if the policy has been created. - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - policy: policy - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-validate-response.json - :language: javascript diff --git a/api-ref/source/policy_types.inc b/api-ref/source/policy_types.inc deleted file mode 100644 index 7b312c953..000000000 --- a/api-ref/source/policy_types.inc +++ /dev/null @@ -1,111 +0,0 @@ -=========================== -Policy Types (policy-types) -=========================== - -Lists all policy types and shows details for a policy type. - -List policy types -================= - -.. rest_method:: GET /v1/policy-types - -Lists all supported policy types. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - policy_types: policy_types - -Response Example ----------------- - -For API microversion lower than 1.5, the response only contains the name for -each policy type: - -.. literalinclude:: samples/policy-types-list-response.json - :language: javascript - -Since API microversion 1.5, the response contains the support status of each -policy type and the version is provided using a separate key: - -.. literalinclude:: samples/policy-types-list-response-v1.5.json - :language: javascript - - -Show policy type details -======================== - -.. rest_method:: GET /v1/policy-types/{policy_type} - -Shows details for a policy type. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy_type: policy_type_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - policy_type: policy_type - - name: policy_type_name - - schema: policy_type_schema - -Response Example ----------------- - -For API microversion lower than 1.5, the response only contains the name and -schema of the specified policy type: - -.. literalinclude:: samples/policy-type-show-response.json - :language: javascript - -Since API microversion 1.5, the response contains the support status of the -specified policy type: - -.. literalinclude:: samples/policy-type-show-response-v1.5.json - :language: javascript - diff --git a/api-ref/source/profile_types.inc b/api-ref/source/profile_types.inc deleted file mode 100644 index 2d8b2aa11..000000000 --- a/api-ref/source/profile_types.inc +++ /dev/null @@ -1,156 +0,0 @@ -============================= -Profile Types (profile-types) -============================= - -Lists all profile types and shows details for a profile type. - -List profile types -================== - -.. rest_method:: GET /v1/profile-types - -Lists supported profile types. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile_types: profile_types - -Response Example ----------------- - -For API microversion lower than 1.5, the response only contains the name for -each profile type: - -.. literalinclude:: samples/profile-types-list-response.json - :language: javascript - -Since API microversion 1.5, the response contains the support status of each -profile type and the version is provided using a separate key: - -.. literalinclude:: samples/profile-types-list-response-v1.5.json - :language: javascript - - -Show profile type details -========================= - -.. rest_method:: GET /v1/profile-types/{profile_type} - -Shows details for a profile type. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_type: profile_type_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile_type: profile_type - - name: profile_type_name - - schema: profile_type_schema - -Response Example ----------------- - -For API microversion lower than 1.5, the response only contains the name and -schema of the specified profile type: - -.. literalinclude:: samples/profile-type-show-response.json - :language: javascript - -Since API microversion 1.5, the response contains the support status of the -specified profile type: - -.. literalinclude:: samples/profile-type-show-response-v1.5.json - :language: javascript - - -List profile type operations -============================ - -.. rest_method:: GET /v1/profile-types/{profile_type}/ops - -List operations and parameters supported by a profile type. - -This API is only available since API microversion 1.4. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_type: profile_type_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - operations: operations - -Response Example ----------------- - -.. literalinclude:: samples/profile-type-ops-response.json - :language: javascript diff --git a/api-ref/source/profiles.inc b/api-ref/source/profiles.inc deleted file mode 100644 index e7371f1dd..000000000 --- a/api-ref/source/profiles.inc +++ /dev/null @@ -1,356 +0,0 @@ -=================== -Profiles (profiles) -=================== - -Lists all profiles and creates, shows information for, updates, and deletes a -profile. - -List profiles -============= - -.. rest_method:: GET /v1/profiles - -Lists all profiles. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - global_project: global_project - - limit: limit - - marker: marker - - name: name_query - - sort: sort - - type: type_query - -The sorting keys include ``name``, ``type``, ``created_at`` and -``updated_at``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profiles: profiles - - created_at: created_at - - domain: domain - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-list-response.json - :language: javascript - - -Create profile -============== - -.. rest_method:: POST /v1/profiles - -Creates a profile. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile: profile - - name: profile_name - - metadata: metadata_req - - spec: profile_spec - -Request Example ---------------- - -.. literalinclude:: samples/profile-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile: profile - - created_at: created_at - - domain: domain - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-create-response.json - :language: javascript - - -Show profile details -==================== - -.. rest_method:: GET /v1/profiles/{profile_id} - -Shows details for a profile. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_id: profile_id_url - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile: profile - - created_at: created_at - - domain: domain - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-show-response.json - :language: javascript - - -Update profile -============== - -.. rest_method:: PATCH /v1/profiles/{profile_id} - -Updates a profile. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_id: profile_id_url - - profile: profile - - metadata: metadata_req - - name: name_req - -Request Example ---------------- - -.. literalinclude:: samples/profile-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile: profile - - created_at: created_at - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-update-response.json - :language: javascript - - -Delete profile -============== - -.. rest_method:: DELETE /v1/profiles/{profile_id} - -Deletes a profile. - -Response Codes --------------- - -A profile cannot be deleted if it is still used by node or cluster. In that -case, a 409 error will be returned. - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_id: profile_id_url - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - -Validate profile -================ - -.. rest_method:: POST /v1/profiles/validate - -Validates a profile. - -This API is only available since API microversion 1.2. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile: profile - - spec: profile_spec - -Request Example ---------------- - -.. literalinclude:: samples/profile-validate-request.json - :language: javascript - -Response Parameters -------------------- - -The response contains properties as if the profile is created. - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile: profile - - created_at: created_at - - domain: domain - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-validate-response.json - :language: javascript diff --git a/api-ref/source/receivers.inc b/api-ref/source/receivers.inc deleted file mode 100644 index bc1938490..000000000 --- a/api-ref/source/receivers.inc +++ /dev/null @@ -1,360 +0,0 @@ -===================== -Receivers (receivers) -===================== - -Lists all receivers and creates, shows information for, and deletes a receiver. - - -List receivers -============== - -.. rest_method:: GET /v1/receivers - -Lists all receivers. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - name: name_query - - type: receiver_type_query - - cluster_id: cluster_identity_query - - action: receiver_action_query - - user: receiver_user_query - -The sorting keys include ``name``, ``type``, ``action``, ``cluster_id``, -``created_at`` and ``user``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - receivers: receivers - - action: receiver_action - - actor: receiver_actor - - channel: receiver_channel - - cluster_id: cluster_id - - created_at: created_at - - domain: domain - - id: receiver_id - - name: name - - params: receiver_params - - project: project - - type: receiver_type - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/receivers-list-response.json - :language: javascript - - -Create receiver -=============== - -.. rest_method:: POST /v1/receivers - -Creates a receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 500 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver: receiver - - name: receiver_name - - cluster_id: receiver_cluster_identity - - type: receiver_type_req - - action: receiver_action - - actor: receiver_actor - - params: receiver_params_req - -Request Example ---------------- - -.. literalinclude:: samples/receiver-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - receiver: receiver - - action: receiver_action - - actor: receiver_actor - - channel: receiver_channel - - cluster_id: cluster_id - - created_at: created_at - - domain: domain - - id: receiver_id - - name: name - - params: receiver_params - - project: project - - type: receiver_type - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/receiver-create-response.json - :language: javascript - - -Show receiver details -===================== - -.. rest_method:: GET /v1/receivers/{receiver_id} - -Shows details for a receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver_id: receiver_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - receiver: receiver - - action: receiver_action - - actor: receiver_actor - - channel: receiver_channel - - cluster_id: cluster_id - - created_at: created_at - - domain: domain - - id: receiver_id - - name: name - - params: receiver_params - - project: project - - type: receiver_type - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/receiver-show-response.json - :language: javascript - - -Update receiver -================== - -.. rest_method:: PATCH /v1/receivers/{receiver_id} - - min_version: 1.7 - -Updates a receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver_id: receiver_id_url - - receiver: receiver - - name: name_req - - action: receiver_action_req - - params: receiver_params_req - - -Request Example ---------------- - -.. literalinclude:: samples/receiver-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - receiver: receiver - - action: receiver_action - - actor: receiver_actor - - channel: receiver_channel - - cluster_id: cluster_id - - created_at: created_at - - domain: domain - - id: receiver_id - - name: name - - params: receiver_params - - project: project - - type: receiver_type - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/receiver-update-response.json - :language: javascript - - -Delete receiver -=============== - -.. rest_method:: DELETE /v1/receivers/{receiver_id} - -Deletes a receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -This operation does not accept a request body. - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver_id: receiver_id_url - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - -This operation does not return a response body. - - -Notify receiver -=============== - -.. rest_method:: POST /v1/receivers/{receiver_id}/notify - -Notifies message type receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -This operation does not accept a request body. - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver_id: receiver_id_url - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - -This operation does not return a response body. diff --git a/api-ref/source/samples/action-get-request.json b/api-ref/source/samples/action-get-request.json deleted file mode 100644 index c3d511d00..000000000 --- a/api-ref/source/samples/action-get-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "action": { - "status": "CANCELLED", - } -} diff --git a/api-ref/source/samples/action-get-response.json b/api-ref/source/samples/action-get-response.json deleted file mode 100644 index 56d4c90fa..000000000 --- a/api-ref/source/samples/action-get-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "action": { - "action": "CLUSTER_DELETE", - "cause": "RPC Request", - "created_at": "2015-06-27T05:09:43Z", - "data": {}, - "depended_by": [], - "depends_on": [], - "end_time": 1423570000.0, - "id": "ffbb9175-d510-4bc1-b676-c6aba2a4ca81", - "inputs": {}, - "interval": -1, - "name": "cluster_delete_fcc9b635", - "outputs": {}, - "owner": null, - "project": "f1fe61dcda2f4618a14c10dc7abc214d", - "start_time": 1423570000.0, - "status": "FAILED", - "status_reason": "Cluster action FAILED", - "target": "fcc9b635-52e3-490b-99f2-87b1640e4e89", - "timeout": 3600, - "updated_at": null, - "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" - } -} diff --git a/api-ref/source/samples/actions-list-response.json b/api-ref/source/samples/actions-list-response.json deleted file mode 100644 index 83fbb57bd..000000000 --- a/api-ref/source/samples/actions-list-response.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "actions": [ - { - "action": "NODE_CREATE", - "cause": "RPC Request", - "created_at": "2015-12-04T04:54:41Z", - "data": {}, - "depended_by": [], - "depends_on": [], - "end_time": 1425550000.0, - "id": "2366d440-c73e-4961-9254-6d1c3af7c167", - "inputs": {}, - "interval": -1, - "name": "node_create_0df0931b", - "outputs": {}, - "owner": null, - "project": "f1fe61dcda2f4618a14c10dc7abc214d", - "start_time": 1425550000.0, - "status": "SUCCEEDED", - "status_reason": "Action completed successfully.", - "target": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "timeout": 3600, - "updated_at": null, - "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" - }, - { - "action": "NODE_DELETE", - "cause": "RPC Request", - "created_at": "2015-11-04T05:21:41Z", - "data": {}, - "depended_by": [], - "depends_on": [], - "end_time": 1425550000.0, - "id": "edce3528-864f-41fb-8759-f4707925cc09", - "inputs": {}, - "interval": -1, - "name": "node_delete_f0de9b9c", - "outputs": {}, - "owner": null, - "project": "f1fe61dcda2f4618a14c10dc7abc214d", - "start_time": 1425550000.0, - "status": "SUCCEEDED", - "status_reason": "Action completed successfully.", - "target": "f0de9b9c-6d48-4a46-af21-2ca8607777fe", - "timeout": 3600, - "updated_at": null, - "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" - } - ] -} diff --git a/api-ref/source/samples/build-show-response.json b/api-ref/source/samples/build-show-response.json deleted file mode 100644 index a5ddde4c0..000000000 --- a/api-ref/source/samples/build-show-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "build_info": { - "api": { - "revision": "1.0" - }, - "engine": { - "revision": "2.0" - } - } -} diff --git a/api-ref/source/samples/cluster-action-response.json b/api-ref/source/samples/cluster-action-response.json deleted file mode 100644 index 1182ce558..000000000 --- a/api-ref/source/samples/cluster-action-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "action": "2a0ff107-e789-4660-a122-3816c43af703" -} \ No newline at end of file diff --git a/api-ref/source/samples/cluster-add-nodes-request.json b/api-ref/source/samples/cluster-add-nodes-request.json deleted file mode 100644 index e002fb8ac..000000000 --- a/api-ref/source/samples/cluster-add-nodes-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "add_nodes": { - "nodes": [ - "node-1234", - "node-5678" - ] - } -} diff --git a/api-ref/source/samples/cluster-attach-policy-request.json b/api-ref/source/samples/cluster-attach-policy-request.json deleted file mode 100644 index 8612d638b..000000000 --- a/api-ref/source/samples/cluster-attach-policy-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "policy_attach": { - "policy_id": "dp01", - "enabled": false - } -} diff --git a/api-ref/source/samples/cluster-attrs-list-response.json b/api-ref/source/samples/cluster-attrs-list-response.json deleted file mode 100644 index ee2f5b503..000000000 --- a/api-ref/source/samples/cluster-attrs-list-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "cluster_attributes": [ - { - "id": "28b1771d-5aaf-4692-b701-fd216b4fd9e9", - "value": "10.0.0.12" - }, - { - "id": "02db8741-03c5-466c-98a0-b83d4bb92c8c", - "value": "10.0.0.13" - }, - { - "id": "08a7eec7-0f94-4f7a-92f2-55ffb1049335", - "value": "10.0.0.14" - } - ] -} diff --git a/api-ref/source/samples/cluster-check-request.json b/api-ref/source/samples/cluster-check-request.json deleted file mode 100644 index 6d831ea62..000000000 --- a/api-ref/source/samples/cluster-check-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "check": {} -} diff --git a/api-ref/source/samples/cluster-complete-lifecycle-request.json b/api-ref/source/samples/cluster-complete-lifecycle-request.json deleted file mode 100644 index f6c152300..000000000 --- a/api-ref/source/samples/cluster-complete-lifecycle-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "complete_lifecycle": { - "lifecycle_action_token": "ffbb9175-d510-4bc1-b676-c6aba2a4ca81" - } -} diff --git a/api-ref/source/samples/cluster-create-request.json b/api-ref/source/samples/cluster-create-request.json deleted file mode 100644 index 8f8ca16c4..000000000 --- a/api-ref/source/samples/cluster-create-request.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cluster": { - "config": {}, - "desired_capacity": 0, - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "test_cluster", - "profile_id": "mystack", - "timeout": null - } -} diff --git a/api-ref/source/samples/cluster-create-response.json b/api-ref/source/samples/cluster-create-response.json deleted file mode 100644 index ddb424488..000000000 --- a/api-ref/source/samples/cluster-create-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "cluster": { - "config": {}, - "created_at": null, - "data": {}, - "dependents": {}, - "desired_capacity": 4, - "domain": null, - "id": "45edadcb-c73b-4920-87e1-518b2f29f54b", - "init_at": "2015-02-10T14:16:10", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "test_cluster", - "nodes": [], - "policies": [], - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "INIT", - "status_reason": "Initializing", - "timeout": 3600, - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/cluster-del-nodes-request.json b/api-ref/source/samples/cluster-del-nodes-request.json deleted file mode 100644 index ecc3ff930..000000000 --- a/api-ref/source/samples/cluster-del-nodes-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "del_nodes": { - "nodes": [ - "aff0135", - "e28a207" - ], - "destroy_after_deletion": false - } -} diff --git a/api-ref/source/samples/cluster-detach-policy-request.json b/api-ref/source/samples/cluster-detach-policy-request.json deleted file mode 100644 index 54b828817..000000000 --- a/api-ref/source/samples/cluster-detach-policy-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "policy_detach": { - "policy_id": "5630fb31" - } -} diff --git a/api-ref/source/samples/cluster-list-response.json b/api-ref/source/samples/cluster-list-response.json deleted file mode 100644 index 54ca5489d..000000000 --- a/api-ref/source/samples/cluster-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "clusters": [ - { - "created_at": "2016-05-11T07:29:04", - "data": {}, - "desired_capacity": 0, - "domain": null, - "id": "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", - "init_at": "2016-05-11T07:29:04", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "c0", - "nodes": [], - "policies": [], - "profile_id": "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", - "profile_name": "pcirros", - "project": "eee0b7c083e84501bdd50fb269d2a10e", - "status": "ACTIVE", - "status_reason": "Cluster creation succeeded.", - "timeout": 3600, - "updated_at": null, - "user": "ab79b9647d074e46ac223a8fa297b846" - } - ] -} diff --git a/api-ref/source/samples/cluster-operation-request.json b/api-ref/source/samples/cluster-operation-request.json deleted file mode 100644 index 9b3b20ac7..000000000 --- a/api-ref/source/samples/cluster-operation-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "reboot": { - "filters": { - "role": "slave" - }, - "params": { - "type": "soft" - } - } -} diff --git a/api-ref/source/samples/cluster-policies-list-response.json b/api-ref/source/samples/cluster-policies-list-response.json deleted file mode 100644 index a34262104..000000000 --- a/api-ref/source/samples/cluster-policies-list-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "cluster_policies": [ - { - "cluster_id": "7d85f602-a948-4a30-afd4-e84f47471c15", - "cluster_name": "cluster4", - "enabled": true, - "id": "06be3a1f-b238-4a96-a737-ceec5714087e", - "policy_id": "714fe676-a08f-4196-b7af-61d52eeded15", - "policy_name": "dp01", - "policy_type": "senlin.policy.deletion-1.0" - }, - { - "cluster_id": "7d85f602-a948-4a30-afd4-e84f47471c15", - "cluster_name": "cluster4", - "enabled": true, - "id": "abddc45e-ac31-4f90-93cc-db55a7d8dd6d", - "policy_id": "e026e09f-a3e9-4dad-a1b9-d7ba316026a1", - "policy_name": "sp1", - "policy_type": "senlin.policy.scaling-1.0" - } - ] -} diff --git a/api-ref/source/samples/cluster-policy-show-response.json b/api-ref/source/samples/cluster-policy-show-response.json deleted file mode 100644 index caf05eb91..000000000 --- a/api-ref/source/samples/cluster-policy-show-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "cluster_policy": { - "cluster_id": "7d85f602-a948-4a30-afd4-e84f47471c15", - "cluster_name": "cluster4", - "enabled": true, - "id": "06be3a1f-b238-4a96-a737-ceec5714087e", - "policy_id": "714fe676-a08f-4196-b7af-61d52eeded15", - "policy_name": "dp01", - "policy_type": "senlin.policy.deletion-1.0" - } -} diff --git a/api-ref/source/samples/cluster-recover-request.json b/api-ref/source/samples/cluster-recover-request.json deleted file mode 100644 index 27d6e43a3..000000000 --- a/api-ref/source/samples/cluster-recover-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "recover": { - "operation": "reboot", - "operation_params": { - "type": "soft" - }, - "check": false - } -} diff --git a/api-ref/source/samples/cluster-replace-nodes-request.json b/api-ref/source/samples/cluster-replace-nodes-request.json deleted file mode 100644 index 5fba29167..000000000 --- a/api-ref/source/samples/cluster-replace-nodes-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "replace_nodes": { - "nodes": { - "node-1234": "node-5678" - } - } -} diff --git a/api-ref/source/samples/cluster-resize-request.json b/api-ref/source/samples/cluster-resize-request.json deleted file mode 100644 index 5bdf47e0c..000000000 --- a/api-ref/source/samples/cluster-resize-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "resize": { - "adjustment_type": "CHANGE_IN_CAPACITY", - "max_size": 5, - "min_size": 1, - "number": -2, - "strict": true - } -} diff --git a/api-ref/source/samples/cluster-scale-in-request.json b/api-ref/source/samples/cluster-scale-in-request.json deleted file mode 100644 index 561f3bf00..000000000 --- a/api-ref/source/samples/cluster-scale-in-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "scale_in": { - "count": 2 - } -} diff --git a/api-ref/source/samples/cluster-scale-out-request.json b/api-ref/source/samples/cluster-scale-out-request.json deleted file mode 100644 index b0c9b757e..000000000 --- a/api-ref/source/samples/cluster-scale-out-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "scale_out": { - "count": 2 - } -} diff --git a/api-ref/source/samples/cluster-show-response.json b/api-ref/source/samples/cluster-show-response.json deleted file mode 100644 index cdd056cf9..000000000 --- a/api-ref/source/samples/cluster-show-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "cluster": { - "config": {}, - "created_at": "2015-02-11T15:13:20Z", - "data": {}, - "dependents": {}, - "desired_capacity": 0, - "domain": null, - "id": "45edadcb-c73b-4920-87e1-518b2f29f54b", - "init_at": "2015-02-10T14:26:10", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "test_cluster", - "nodes": [], - "policies": [], - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "ACTIVE", - "status_reason": "Creation succeeded", - "timeout": 3600, - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/cluster-update-policy-request.json b/api-ref/source/samples/cluster-update-policy-request.json deleted file mode 100644 index 26b3b8e12..000000000 --- a/api-ref/source/samples/cluster-update-policy-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "update_policy": { - "policy_id": "dp01", - "enabled": false - } -} diff --git a/api-ref/source/samples/cluster-update-request.json b/api-ref/source/samples/cluster-update-request.json deleted file mode 100644 index 2baf4b02b..000000000 --- a/api-ref/source/samples/cluster-update-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "cluster": { - "metadata": null, - "name": null, - "profile_id": null, - "timeout": 30, - "profile_only": true - } -} diff --git a/api-ref/source/samples/cluster-update-response.json b/api-ref/source/samples/cluster-update-response.json deleted file mode 100644 index f134242a5..000000000 --- a/api-ref/source/samples/cluster-update-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "cluster": { - "config": {}, - "created_at": "2015-02-11T15:13:20Z", - "data": {}, - "dependents": {}, - "desired_capacity": 0, - "domain": null, - "id": "45edadcb-c73b-4920-87e1-518b2f29f54b", - "init_at": "2015-02-10T14:26:10", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "test_cluster", - "nodes": [], - "policies": [], - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "UPDATING", - "status_reason": "Updating", - "timeout": 3600, - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/clusters-list-response.json b/api-ref/source/samples/clusters-list-response.json deleted file mode 100644 index fb733427a..000000000 --- a/api-ref/source/samples/clusters-list-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "clusters": [ - { - "config": {}, - "created_at": "2015-02-10T14:26:14Z", - "data": {}, - "dependents": {}, - "desired_capacity": 4, - "domain": null, - "id": "7d85f602-a948-4a30-afd4-e84f47471c15", - "init_at": "2015-02-10T14:26:11", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "cluster1", - "nodes": [ - "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", - "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", - "da1e9c87-e584-4626-a120-022da5062dac" - ], - "policies": [], - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "ACTIVE", - "status_reason": "Cluster scale-in succeeded", - "timeout": 3600, - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } - ] -} diff --git a/api-ref/source/samples/event-show-response.json b/api-ref/source/samples/event-show-response.json deleted file mode 100644 index 535cabea2..000000000 --- a/api-ref/source/samples/event-show-response.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "event": { - "action": "create", - "cluster": null, - "cluster_id": null, - "id": "2d255b9c-8f36-41a2-a137-c0175ccc29c3", - "level": "20", - "meta_data": {}, - "oid": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "oname": "node009", - "otype": "NODE", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "CREATING", - "status_reason": "Initializing", - "timestamp": "2015-03-05T08:53:15Z", - "user": "a21ded6060534d99840658a777c2af5a" - } -} diff --git a/api-ref/source/samples/events-list-response.json b/api-ref/source/samples/events-list-response.json deleted file mode 100644 index 5a2bdcfb8..000000000 --- a/api-ref/source/samples/events-list-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "events": [ - { - "action": "create", - "cluster": null, - "cluster_id": null, - "id": "2d255b9c-8f36-41a2-a137-c0175ccc29c3", - "level": "20", - "meta_data": {}, - "oid": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "oname": "node009", - "otype": "NODE", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "CREATING", - "status_reason": "Initializing", - "timestamp": "2015-03-05T08:53:15Z", - "user": "a21ded6060534d99840658a777c2af5a" - } - ] -} diff --git a/api-ref/source/samples/node-action-response.json b/api-ref/source/samples/node-action-response.json deleted file mode 100644 index 458f57640..000000000 --- a/api-ref/source/samples/node-action-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "action": "7f760b61-7b15-4a50-af05-319922fa3229" -} \ No newline at end of file diff --git a/api-ref/source/samples/node-adopt-preview-request.json b/api-ref/source/samples/node-adopt-preview-request.json deleted file mode 100644 index 0ee9477a4..000000000 --- a/api-ref/source/samples/node-adopt-preview-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "identity": "65e27958-d6dc-4b0e-87bf-78e8f5536cbc", - "overrides": null, - "snapshot": true, - "type": "os.nova.server-1.0" -} diff --git a/api-ref/source/samples/node-adopt-preview-response.json b/api-ref/source/samples/node-adopt-preview-response.json deleted file mode 100644 index d35054712..000000000 --- a/api-ref/source/samples/node-adopt-preview-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "node_preview": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - } -} diff --git a/api-ref/source/samples/node-adopt-request.json b/api-ref/source/samples/node-adopt-request.json deleted file mode 100644 index 12050a700..000000000 --- a/api-ref/source/samples/node-adopt-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "identity": "65e27958-d6dc-4b0e-87bf-78e8f5536cbc", - "metadata": {}, - "name": "node009", - "overrides": null, - "role": "master", - "snapshot": true, - "type": "os.nova.server-1.0" -} diff --git a/api-ref/source/samples/node-adopt-response.json b/api-ref/source/samples/node-adopt-response.json deleted file mode 100644 index 696d5558a..000000000 --- a/api-ref/source/samples/node-adopt-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "node": { - "cluster_id": null, - "created_at": null, - "data": {}, - "domain": null, - "id": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "index": -1, - "init_at": "2015-03-05T08:53:15Z", - "metadata": {}, - "name": "node009", - "physical_id": "65e27958-d6dc-4b0e-87bf-78e8f5536cbc", - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "prof-node009", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "role": "master", - "status": "ACTIVE", - "status_reason": "Node adopted successfully", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/node-check-request.json b/api-ref/source/samples/node-check-request.json deleted file mode 100644 index 6d831ea62..000000000 --- a/api-ref/source/samples/node-check-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "check": {} -} diff --git a/api-ref/source/samples/node-create-request.json b/api-ref/source/samples/node-create-request.json deleted file mode 100644 index 55e5c3311..000000000 --- a/api-ref/source/samples/node-create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "node": { - "cluster_id": null, - "metadata": {}, - "name": "node009", - "profile_id": "mystack", - "role": "master" - } -} diff --git a/api-ref/source/samples/node-create-response.json b/api-ref/source/samples/node-create-response.json deleted file mode 100644 index 5944459c7..000000000 --- a/api-ref/source/samples/node-create-response.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "node": { - "cluster_id": null, - "created_at": null, - "data": {}, - "dependents": {}, - "domain": null, - "id": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "index": -1, - "init_at": "2015-03-05T08:53:15Z", - "metadata": {}, - "name": "node009", - "physical_id": "", - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "role": "master", - "status": "INIT", - "status_reason": "Initializing", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/node-list-response.json b/api-ref/source/samples/node-list-response.json deleted file mode 100644 index 9cb82d8ea..000000000 --- a/api-ref/source/samples/node-list-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "nodes": [ - { - "cluster_id": "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", - "created_at": "2016-05-13T07:02:20Z", - "data": {}, - "dependents": {}, - "domain": null, - "id": "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", - "index": 2, - "init_at": "2016-05-13T07:02:04Z", - "metadata": {}, - "name": "node-e395be1e-002", - "physical_id": "66a81d68-bf48-4af5-897b-a3bfef7279a8", - "profile_id": "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", - "profile_name": "pcirros", - "project_id": "eee0b7c083e84501bdd50fb269d2a10e", - "role": "", - "status": "ACTIVE", - "status_reason": "Creation succeeded", - "updated_at": null, - "user": "ab79b9647d074e46ac223a8fa297b846" - } - ] -} diff --git a/api-ref/source/samples/node-operation-request.json b/api-ref/source/samples/node-operation-request.json deleted file mode 100644 index 86d866af3..000000000 --- a/api-ref/source/samples/node-operation-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "reboot": { - "type": "SOFT" - } -} diff --git a/api-ref/source/samples/node-recover-request.json b/api-ref/source/samples/node-recover-request.json deleted file mode 100644 index 27d6e43a3..000000000 --- a/api-ref/source/samples/node-recover-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "recover": { - "operation": "reboot", - "operation_params": { - "type": "soft" - }, - "check": false - } -} diff --git a/api-ref/source/samples/node-show-response.json b/api-ref/source/samples/node-show-response.json deleted file mode 100644 index b513ce6cc..000000000 --- a/api-ref/source/samples/node-show-response.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "node": { - "cluster_id": null, - "created_at": "2015-02-10T12:03:16Z", - "data": {}, - "dependents": {}, - "domain": null, - "id": "d5779bb0-f0a0-49c9-88cc-6f078adb5a0b", - "index": -1, - "init_at": "2015-02-10T12:03:13", - "metadata": {}, - "name": "node1", - "physical_id": "f41537fa-22ab-4bea-94c0-c874e19d0c80", - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "role": null, - "status": "ACTIVE", - "status_reason": "Creation succeeded", - "updated_at": "2015-03-04T04:58:27Z", - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/node-update-request.json b/api-ref/source/samples/node-update-request.json deleted file mode 100644 index b593bb7f8..000000000 --- a/api-ref/source/samples/node-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "node": { - "name": "new_node_name" - } -} diff --git a/api-ref/source/samples/policy-create-request.json b/api-ref/source/samples/policy-create-request.json deleted file mode 100644 index d610eedec..000000000 --- a/api-ref/source/samples/policy-create-request.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "policy": { - "name": "sp001", - "spec": { - "properties": { - "adjustment": { - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - } - } -} diff --git a/api-ref/source/samples/policy-create-response.json b/api-ref/source/samples/policy-create-response.json deleted file mode 100644 index 4d9f6fcaf..000000000 --- a/api-ref/source/samples/policy-create-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "policy": { - "created_at": "2015-03-02T07:40:31", - "data": {}, - "domain": null, - "id": "02f62195-2198-4797-b0a9-877632208527", - "name": "sp001", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "adjustment": { - "best_effort": true, - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - }, - "type": "senlin.policy.scaling-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/policy-list-response.json b/api-ref/source/samples/policy-list-response.json deleted file mode 100644 index 6e87c214d..000000000 --- a/api-ref/source/samples/policy-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "policies": [ - { - "created_at": "2015-02-15T08:33:13.000000", - "data": {}, - "domain": null, - "id": "7192d8df-73be-4e98-ab99-1cf6d5066729", - "name": "test_policy_1", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "description": "A test policy", - "properties": { - "criteria": "OLDEST_FIRST", - "destroy_after_deletion": true, - "grace_period": 60, - "reduce_desired_capacity": false - }, - "type": "senlin.policy.deletion", - "version": "1.0" - }, - "type": "senlin.policy.deletion-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } - ] -} diff --git a/api-ref/source/samples/policy-show-response.json b/api-ref/source/samples/policy-show-response.json deleted file mode 100644 index 4d9f6fcaf..000000000 --- a/api-ref/source/samples/policy-show-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "policy": { - "created_at": "2015-03-02T07:40:31", - "data": {}, - "domain": null, - "id": "02f62195-2198-4797-b0a9-877632208527", - "name": "sp001", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "adjustment": { - "best_effort": true, - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - }, - "type": "senlin.policy.scaling-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/policy-type-show-response-v1.5.json b/api-ref/source/samples/policy-type-show-response-v1.5.json deleted file mode 100644 index 8b9563eef..000000000 --- a/api-ref/source/samples/policy-type-show-response-v1.5.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "policy_type": { - "name": "senlin.policy.affinity-1.0", - "schema": { - "availability_zone": { - "description": "Name of the availability zone to place the nodes.", - "required": false, - "type": "String", - "updatable": false - }, - "enable_drs_extension": { - "default": false, - "description": "Enable vSphere DRS extension.", - "required": false, - "type": "Boolean", - "updatable": false - }, - "servergroup": { - "description": "Properties of the VM server group", - "required": false, - "schema": { - "name": { - "description": "The name of the server group", - "required": false, - "type": "String", - "updatable": false - }, - "policies": { - "constraints": [ - { - "constraint": [ - "affinity", - "anti-affinity" - ], - "type": "AllowedValues" - } - ], - "default": "anti-affinity", - "description": "The server group policies.", - "required": false, - "type": "String", - "updatable": false - } - }, - "type": "Map", - "updatable": false - } - }, - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - } -} diff --git a/api-ref/source/samples/policy-type-show-response.json b/api-ref/source/samples/policy-type-show-response.json deleted file mode 100644 index bec3e6152..000000000 --- a/api-ref/source/samples/policy-type-show-response.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "policy_type": { - "name": "senlin.policy.affinity-1.0", - "schema": { - "availability_zone": { - "description": "Name of the availability zone to place the nodes.", - "required": false, - "type": "String", - "updatable": false - }, - "enable_drs_extension": { - "default": false, - "description": "Enable vSphere DRS extension.", - "required": false, - "type": "Boolean", - "updatable": false - }, - "servergroup": { - "description": "Properties of the VM server group", - "required": false, - "schema": { - "name": { - "description": "The name of the server group", - "required": false, - "type": "String", - "updatable": false - }, - "policies": { - "constraints": [ - { - "constraint": [ - "affinity", - "anti-affinity" - ], - "type": "AllowedValues" - } - ], - "default": "anti-affinity", - "description": "The server group policies.", - "required": false, - "type": "String", - "updatable": false - } - }, - "type": "Map", - "updatable": false - } - } - } -} diff --git a/api-ref/source/samples/policy-types-list-response-v1.5.json b/api-ref/source/samples/policy-types-list-response-v1.5.json deleted file mode 100644 index 879e688ec..000000000 --- a/api-ref/source/samples/policy-types-list-response-v1.5.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "policy_types": [ - { - "name": "senlin.policy.affinity", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - }, - { - "name": "senlin.policy.health", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "EXPERIMENTAL", - "since": "2016.10" - } - ] - } - }, - { - "name": "senlin.policy.scaling", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - } - }, - { - "name": "senlin.policy.region_placement", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "EXPERIMENTAL", - "since": "2016.04" - }, - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - } - ] -} diff --git a/api-ref/source/samples/policy-types-list-response.json b/api-ref/source/samples/policy-types-list-response.json deleted file mode 100644 index 6914ce935..000000000 --- a/api-ref/source/samples/policy-types-list-response.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "policy_types": [ - { - "name": "senlin.policy.affinity-1.0" - }, - { - "name": "senlin.policy.batch-1.0" - }, - { - "name": "senlin.policy.health-1.0" - }, - { - "name": "senlin.policy.scaling-1.0" - }, - { - "name": "senlin.policy.region_placement-1.0" - }, - { - "name": "senlin.policy.deletion-1.0" - }, - { - "name": "senlin.policy.loadbalance-1.1" - }, - { - "name": "senlin.policy.zone_placement-1.0" - } - ] -} diff --git a/api-ref/source/samples/policy-update-request.json b/api-ref/source/samples/policy-update-request.json deleted file mode 100644 index c1c489a2a..000000000 --- a/api-ref/source/samples/policy-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "policy": { - "name": "new_name" - } -} diff --git a/api-ref/source/samples/policy-update-response.json b/api-ref/source/samples/policy-update-response.json deleted file mode 100644 index 98f53d70b..000000000 --- a/api-ref/source/samples/policy-update-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "policy": { - "created_at": "2015-10-14T09:14:53", - "data": {}, - "domain": null, - "id": "ac5415bd-f522-4160-8be0-f8853e4bc332", - "name": "dp01", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "description": "A policy for node deletion.", - "properties": { - "criteria": "OLDEST_FIRST", - "destroy_after_deletion": true, - "grace_period": 60, - "reduce_desired_capacity": false - }, - "type": "senlin.policy.deletion", - "version": "1.0" - }, - "type": "senlin.policy.deletion-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/policy-validate-request.json b/api-ref/source/samples/policy-validate-request.json deleted file mode 100644 index bc17f02bd..000000000 --- a/api-ref/source/samples/policy-validate-request.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "policy": { - "spec": { - "properties": { - "adjustment": { - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - } - } -} diff --git a/api-ref/source/samples/policy-validate-response.json b/api-ref/source/samples/policy-validate-response.json deleted file mode 100644 index f888649b5..000000000 --- a/api-ref/source/samples/policy-validate-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "policy": { - "created_at": null, - "data": {}, - "domain": null, - "id": null, - "name": "validated_policy", - "project": "1d567ed4ef51453a85545f018b68c26d", - "spec": { - "properties": { - "adjustment": { - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - }, - "type": "senlin.policy.scaling-1.0", - "updated_at": null, - "user": "990e4c1f4a414f74990b17d16f2540b5" - } -} diff --git a/api-ref/source/samples/profile-create-request.json b/api-ref/source/samples/profile-create-request.json deleted file mode 100644 index 705790b33..000000000 --- a/api-ref/source/samples/profile-create-request.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "profile": { - "metadata": {}, - "name": "test-profile", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - } - } -} diff --git a/api-ref/source/samples/profile-create-response.json b/api-ref/source/samples/profile-create-response.json deleted file mode 100644 index 527207ccf..000000000 --- a/api-ref/source/samples/profile-create-response.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "profile": { - "created_at": "2016-05-05T10:15:22Z", - "domain": null, - "id": "1d85fc39-7d9a-4f64-9751-b127ef554923", - "metadata": {}, - "name": "test-profile", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/profile-list-response.json b/api-ref/source/samples/profile-list-response.json deleted file mode 100644 index e302c880d..000000000 --- a/api-ref/source/samples/profile-list-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "profiles": [ - { - "created_at": "2016-01-03T16:22:23Z", - "domain": null, - "id": "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", - "metadata": {}, - "name": "pserver", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "flavor": 1, - "image": "cirros-0.3.4-x86_64-uec", - "key_name": "oskey", - "name": "cirros_server", - "networks": [ - { - "network": "private" - } - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } - ] -} diff --git a/api-ref/source/samples/profile-show-response.json b/api-ref/source/samples/profile-show-response.json deleted file mode 100644 index eb6e185d4..000000000 --- a/api-ref/source/samples/profile-show-response.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "profile": { - "created_at": "2016-03-10T06:34:56Z", - "domain": null, - "id": "17151d8a-f46f-4541-bde0-db3b207c20d2", - "metadata": {}, - "name": "PF20", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/profile-type-ops-response.json b/api-ref/source/samples/profile-type-ops-response.json deleted file mode 100644 index 86846e0ed..000000000 --- a/api-ref/source/samples/profile-type-ops-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "operations": { - "reboot": { - "description": "Reboot the nova server.", - "parameters": { - "type": { - "constraints": [ - { - "constraint": [ - "SOFT", - "HARD" - ], - "type": "AllowedValues" - } - ], - "default": "SOFT", - "description": "Type of reboot which can be 'SOFT' or 'HARD'.", - "required": false, - "type": "String" - } - } - } - } -} diff --git a/api-ref/source/samples/profile-type-show-response-v1.5.json b/api-ref/source/samples/profile-type-show-response-v1.5.json deleted file mode 100644 index 720b7e62c..000000000 --- a/api-ref/source/samples/profile-type-show-response-v1.5.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "profile_type": { - "name": "os.heat.stack-1.0", - "schema": { - "context": { - "default": {}, - "description": "A dictionary for specifying the customized context for stack operations", - "required": false, - "type": "Map", - "updatable": false - }, - "disable_rollback": { - "default": true, - "description": "A boolean specifying whether a stack operation can be rolled back.", - "required": false, - "type": "Boolean", - "updatable": true - }, - "environment": { - "default": {}, - "description": "A map that specifies the environment used for stack operations.", - "required": false, - "type": "Map", - "updatable": true - }, - "files": { - "default": {}, - "description": "Contents of files referenced by the template, if any.", - "required": false, - "type": "Map", - "updatable": true - }, - "parameters": { - "default": {}, - "description": "Parameters to be passed to Heat for stack operations.", - "required": false, - "type": "Map", - "updatable": true - }, - "template": { - "default": {}, - "description": "Heat stack template.", - "required": false, - "type": "Map", - "updatable": true - }, - "template_url": { - "default": "", - "description": "Heat stack template url.", - "required": false, - "type": "String", - "updatable": true - }, - "timeout": { - "description": "A integer that specifies the number of minutes that a stack operation times out.", - "required": false, - "type": "Integer", - "updatable": true - } - }, - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - } - } -} diff --git a/api-ref/source/samples/profile-type-show-response.json b/api-ref/source/samples/profile-type-show-response.json deleted file mode 100644 index 90d2d514b..000000000 --- a/api-ref/source/samples/profile-type-show-response.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "profile_type": { - "name": "os.heat.stack-1.0", - "schema": { - "context": { - "default": {}, - "description": "A dictionary for specifying the customized context for stack operations", - "required": false, - "type": "Map", - "updatable": false - }, - "disable_rollback": { - "default": true, - "description": "A boolean specifying whether a stack operation can be rolled back.", - "required": false, - "type": "Boolean", - "updatable": true - }, - "environment": { - "default": {}, - "description": "A map that specifies the environment used for stack operations.", - "required": false, - "type": "Map", - "updatable": true - }, - "files": { - "default": {}, - "description": "Contents of files referenced by the template, if any.", - "required": false, - "type": "Map", - "updatable": true - }, - "parameters": { - "default": {}, - "description": "Parameters to be passed to Heat for stack operations.", - "required": false, - "type": "Map", - "updatable": true - }, - "template": { - "default": {}, - "description": "Heat stack template.", - "required": false, - "type": "Map", - "updatable": true - }, - "template_url": { - "default": "", - "description": "Heat stack template url.", - "required": false, - "type": "String", - "updatable": true - }, - "timeout": { - "description": "A integer that specifies the number of minutes that a stack operation times out.", - "required": false, - "type": "Integer", - "updatable": true - } - } - } -} diff --git a/api-ref/source/samples/profile-types-list-response-v1.5.json b/api-ref/source/samples/profile-types-list-response-v1.5.json deleted file mode 100644 index 5d51596f5..000000000 --- a/api-ref/source/samples/profile-types-list-response-v1.5.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "profile_types": [ - { - "name": "container.dockerinc.docker", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "EXPERIMENTAL", - "since": "2017.02" - } - ] - } - }, - { - "name": "os.heat.stack", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - } - }, - { - "name": "os.nova.server", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - } - } - ] -} diff --git a/api-ref/source/samples/profile-types-list-response.json b/api-ref/source/samples/profile-types-list-response.json deleted file mode 100644 index fcf5cf743..000000000 --- a/api-ref/source/samples/profile-types-list-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "profile_types": [ - { - "name": "container.dockerinc.docker-1.0" - }, - { - "name": "os.heat.stack-1.0" - }, - { - "name": "os.nova.server-1.0" - } - ] -} diff --git a/api-ref/source/samples/profile-update-request.json b/api-ref/source/samples/profile-update-request.json deleted file mode 100644 index 5ae1fb158..000000000 --- a/api-ref/source/samples/profile-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "profile": { - "metadata": {"key": "value"}, - "name": "new-name" - } -} diff --git a/api-ref/source/samples/profile-update-response.json b/api-ref/source/samples/profile-update-response.json deleted file mode 100644 index 17a5e1502..000000000 --- a/api-ref/source/samples/profile-update-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "profile": { - "created_at": "2016-03-10T06:34:56Z", - "domain": null, - "id": "17151d8a-f46f-4541-bde0-db3b207c20d2", - "metadata": { - "key": "value" - }, - "name": "new-name", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": "2016-03-11T05:10:11Z", - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/profile-validate-request.json b/api-ref/source/samples/profile-validate-request.json deleted file mode 100644 index f99d9f7b2..000000000 --- a/api-ref/source/samples/profile-validate-request.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "profile": { - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - } - } -} diff --git a/api-ref/source/samples/profile-validate-response.json b/api-ref/source/samples/profile-validate-response.json deleted file mode 100644 index 32042ab06..000000000 --- a/api-ref/source/samples/profile-validate-response.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "profile": { - "created_at": null, - "domain": null, - "id": null, - "metadata": null, - "name": "validated_profile", - "project": "1d567ed4ef51453a85545f018b68c26d", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": null, - "user": "990e4c1f4a414f74990b17d16f2540b5" - } -} diff --git a/api-ref/source/samples/receiver-create-request.json b/api-ref/source/samples/receiver-create-request.json deleted file mode 100644 index c78b95b9c..000000000 --- a/api-ref/source/samples/receiver-create-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "receiver": { - "action": "CLUSTER_SCALE_OUT", - "cluster_id": "cf99d754-3cdc-47f4-8a29-cd14f02f5436", - "name": "cluster_inflate", - "params": { - "count": "1" - }, - "type": "webhook" - } -} diff --git a/api-ref/source/samples/receiver-create-response.json b/api-ref/source/samples/receiver-create-response.json deleted file mode 100644 index 8c5747908..000000000 --- a/api-ref/source/samples/receiver-create-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "receiver": { - "action": "CLUSTER_SCALE_OUT", - "actor": { - "trust_id": [ - "6dc6d336e3fc4c0a951b5698cd1236d9" - ] - }, - "channel": { - "alarm_url": "http://node1:8777/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=2&count=1" - }, - "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", - "created_at": "2015-06-27T05:09:43", - "domain": "Default", - "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", - "name": "cluster_inflate", - "params": { - "count": "1" - }, - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "type": "webhook", - "updated_at": null, - "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" - } -} diff --git a/api-ref/source/samples/receiver-show-response.json b/api-ref/source/samples/receiver-show-response.json deleted file mode 100644 index 8c5747908..000000000 --- a/api-ref/source/samples/receiver-show-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "receiver": { - "action": "CLUSTER_SCALE_OUT", - "actor": { - "trust_id": [ - "6dc6d336e3fc4c0a951b5698cd1236d9" - ] - }, - "channel": { - "alarm_url": "http://node1:8777/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=2&count=1" - }, - "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", - "created_at": "2015-06-27T05:09:43", - "domain": "Default", - "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", - "name": "cluster_inflate", - "params": { - "count": "1" - }, - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "type": "webhook", - "updated_at": null, - "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" - } -} diff --git a/api-ref/source/samples/receiver-update-request.json b/api-ref/source/samples/receiver-update-request.json deleted file mode 100644 index 54fcb76ea..000000000 --- a/api-ref/source/samples/receiver-update-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "receiver": { - "name": "new-name", - "action": "CLUSTER_SCALE_OUT", - "params": { - "count": "2" - } - } -} diff --git a/api-ref/source/samples/receiver-update-response.json b/api-ref/source/samples/receiver-update-response.json deleted file mode 100644 index 1851e68b5..000000000 --- a/api-ref/source/samples/receiver-update-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "receiver": { - "action": "CLUSTER_SCALE_OUT", - "actor": { - "trust_id": [ - "6dc6d336e3fc4c0a951b5698cd1236d9" - ] - }, - "channel": { - "alarm_url": "http://node1:8777/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=2&count=2" - }, - "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", - "created_at": "2015-06-27T05:09:43", - "domain": "Default", - "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", - "name": "new-name", - "params": { - "count": "2" - }, - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "type": "webhook", - "updated_at": "2016-03-11T05:10:11", - "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" - } -} diff --git a/api-ref/source/samples/receivers-list-response.json b/api-ref/source/samples/receivers-list-response.json deleted file mode 100644 index d65547c4c..000000000 --- a/api-ref/source/samples/receivers-list-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "receivers": [ - { - "action": "CLUSTER_SCALE_OUT", - "actor": { - "trust_id": [ - "6dc6d336e3fc4c0a951b5698cd1236d9" - ] - }, - "channel": { - "alarm_url": "http://node1:8777/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=2&count=1" - }, - "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", - "created_at": "2015-06-27T05:09:43", - "domain": "Default", - "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", - "name": "cluster_inflate", - "params": { - "count": "1" - }, - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "type": "webhook", - "updated_at": null, - "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" - } - ] -} diff --git a/api-ref/source/samples/services-list-response.json b/api-ref/source/samples/services-list-response.json deleted file mode 100644 index b501fdccf..000000000 --- a/api-ref/source/samples/services-list-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "services": [ - { - "binary": "senlin-engine", - "disabled_reason": null, - "host": "host1", - "id": "f93f83f6-762b-41b6-b757-80507834d394", - "state": "up", - "status": "enabled", - "topic": "senlin-engine", - "updated_at": "2017-04-24T07:43:12" - } - ] -} diff --git a/api-ref/source/samples/version-show-response.json b/api-ref/source/samples/version-show-response.json deleted file mode 100644 index 8fa260569..000000000 --- a/api-ref/source/samples/version-show-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "version": { - "id": "1.0", - "links": [ - { - "href": "/v1/", - "rel": "self" - }, - { - "href": "https://docs.openstack.org/api-ref/clustering", - "rel": "help" - } - ], - "max_version": "1.7", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.clustering-v1+json" - } - ], - "min_version": "1.0", - "status": "CURRENT", - "updated": "2016-01-18T00:00:00Z" - } -} diff --git a/api-ref/source/samples/versions-list-response.json b/api-ref/source/samples/versions-list-response.json deleted file mode 100644 index 12a4ca7bb..000000000 --- a/api-ref/source/samples/versions-list-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "versions": [ - { - "id": "1.0", - "links": [ - { - "href": "/v1/", - "rel": "self" - }, - { - "href": "https://docs.openstack.org/api-ref/clustering", - "rel": "help" - } - ], - "max_version": "1.7", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.clustering-v1+json" - } - ], - "min_version": "1.0", - "status": "CURRENT", - "updated": "2016-01-18T00:00:00Z" - } - ] -} diff --git a/api-ref/source/samples/webhook-action-response.json b/api-ref/source/samples/webhook-action-response.json deleted file mode 100644 index 5fb7bfa86..000000000 --- a/api-ref/source/samples/webhook-action-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "action": "290c44fa-c60f-4d75-a0eb-87433ba982a3" -} \ No newline at end of file diff --git a/api-ref/source/services.inc b/api-ref/source/services.inc deleted file mode 100644 index 2c338e18a..000000000 --- a/api-ref/source/services.inc +++ /dev/null @@ -1,60 +0,0 @@ -=================== -Services (services) -=================== - -Lists all services for senlin engine. - - -List services -=================== - -.. rest_method:: GET /v1/services - - min_version: 1.7 - -This API is only available since API microversion 1.7. - -Lists all services. - -Response codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - services: services - - binary: binary - - disabled_reason: disabled_reason - - host: host - - id: service_id - - state: service_state - - status: service_status - - topic: topic - - updated_at: updated_at - -Response Example ----------------- - -.. literalinclude:: samples/services-list-response.json - :language: javascript diff --git a/api-ref/source/status.yaml b/api-ref/source/status.yaml deleted file mode 100644 index 21a08799f..000000000 --- a/api-ref/source/status.yaml +++ /dev/null @@ -1,61 +0,0 @@ -################# -# Success Codes # -################# -200: - default: | - Request was successful. -201: - default: | - Resource was created and is ready to use. -202: - default: | - Request was accepted for processing, but the processing has not been - completed. A 'location' header is included in the response which contains - a link to check the progress of the request. -204: - default: | - The server has fulfilled the request by deleting the resource. -300: - default: | - There are multiple choices for resources. The request has to be more - specific to successfully retrieve one of these resources. - multi_version: | - There is more than one API version for choice. The client has to be more - specific to request a service endpoint. - -################# -# Error Codes # -################# - -400: - default: | - Some content in the request was invalid. -401: - default: | - User must authenticate before making a request. -403: - default: | - Policy does not allow current user to do this operation. -404: - default: | - The requested resource could not be found. -405: - default: | - Method is not valid for this endpoint. -406: - default: | - The requested API version is not supported by the API. -409: - default: | - This operation conflicted with another operation on this resource. - duplicate_zone: | - There is already a zone with this name. -500: - default: | - Something went wrong inside the service. This should not happen usually. - If it does happen, it means the server has experienced some serious - problems. -503: - default: | - Service is not available. This is mostly caused by service configuration - errors which prevents the service from successful start up. diff --git a/api-ref/source/versions.inc b/api-ref/source/versions.inc deleted file mode 100644 index 552a03651..000000000 --- a/api-ref/source/versions.inc +++ /dev/null @@ -1,101 +0,0 @@ -============ -API Versions -============ - -Concepts -======== - -The Senlin API supports a ''major versions'' expressed in request URLs and -''microversions'' which can be sent in HTTP header ``OpenStack-API-Version``. - -When the specified ``OpenStack-API-Version`` is not supported by the API -service, a 406 (NotAcceptable) exception will be raised. Note that this applies -to all API requests documented in this guide. - -List Major Versions -=================== - -.. rest_method:: GET / - -Lists information for all Clustering API major versions. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 300: multi_version - -.. rest_status_code:: error status.yaml - - - 503 - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - versions: versions - - id: version_id - - links: version_links - - max_version: version_max_version - - media-types: version_media_types - - min_version: version_min_version - - status: version_status - - updated: version_updated - -Response Example ----------------- - -.. literalinclude:: samples/versions-list-response.json - :language: javascript - - -Show Details of an API Version -============================== - -.. rest_method:: GET /{version}/ - -Show details about an API major version. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 404 - - 406 - - 503 - -Request Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - version: version_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - version: version - - id: version_id - - links: version_links - - max_version: version_max_version - - media-types: version_media_types - - min_version: version_min_version - - status: version_status - - updated: version_updated - -Response Example ----------------- - -.. literalinclude:: samples/version-show-response.json - :language: javascript diff --git a/api-ref/source/webhooks.inc b/api-ref/source/webhooks.inc deleted file mode 100644 index 06c0242a2..000000000 --- a/api-ref/source/webhooks.inc +++ /dev/null @@ -1,55 +0,0 @@ -=================== -Webhooks (webhooks) -=================== - -Triggers an action represented by a webhook. For API microversion less than -1.10, optional params in the query are sent as inputs to be used by the -targeted action. For API microversion equal or greater than 1.10, any -key-value pairs in the request body are sent as inputs to be used by the -targeted action. - -Trigger webhook action -====================== - -.. rest_method:: POST /v1/webhooks/{webhook_id}/trigger - -Triggers a webhook receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - webhook_id: webhook_id_url - - V: webhook_version - - params: webhook_params - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/webhook-action-response.json - :language: javascript diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 5774e51c1..000000000 --- a/bindep.txt +++ /dev/null @@ -1,2 +0,0 @@ -graphviz [!platform:gentoo] -media-gfx/graphviz [platform:gentoo] diff --git a/contrib/kubernetes/README.rst b/contrib/kubernetes/README.rst deleted file mode 100644 index 1e40c58d9..000000000 --- a/contrib/kubernetes/README.rst +++ /dev/null @@ -1,99 +0,0 @@ -kubernetes Profile -================== - -Installation ------------- - -:: - - pip install --editable . - - -Usage ------ - -Prepare a profile for master nodes -.................................. - -Copy the example profile file `kubemaster.yaml` from examples/kubemaster.yaml, -modify related parameters based on your openstack environment. -For now, only official ubuntu 16.04 cloud image is supported. - -:: - - openstack cluster profile create --spec-file kubemaster.yaml profile-master - -Create a cluster for master nodes -................................. - -For now, please create exactly one node in this cluster. This profile doesn't -support multiple master nodes as high-availability mode install. - -:: - - openstack cluster create --min-size 1 --desired-capacity 1 --max-size 1 --profile profile-master cm - - -Prepare a profile for worker nodes -.................................. - -Copy the example profile file `kubenode.yaml`, modify related parameters, -change master-cluster to the senlin cluster you just created. - -:: - - openstack cluster profile create --spec-file kubenode.yaml profile-node - - -Create a cluster for worker nodes -................................. - -:: - - openstack cluster create --desired-capacity 2 --profile profile-node cn - - - -Operate kubernetes ------------------- - -About kubeconfig -................ - -The config file for `kubectl` is located in the `/root/.kube/config` directory -on the master nodes. Copy this file out and place it at `$HOME/.kube/config`. -Change the IP to master node's floating IP in it. Run `kubectl get nodes` and -see if it works. - -Dashboard -......... - -Prepare following file to skip dashboard authentication:: - - $ cat ./dashboard-admin.yaml - apiVersion: rbac.authorization.k8s.io/v1beta1 - kind: ClusterRoleBinding - metadata: - name: kubernetes-dashboard - labels: - k8s-app: kubernetes-dashboard - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - -Apply this config:: - - kubectl apply -f ./dashboard-admin.yaml - -Start a proxy using `kubectl`:: - - kubectl proxy - -Open dashboard on browser at -`http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/`, -skip login process. diff --git a/contrib/kubernetes/TODO.rst b/contrib/kubernetes/TODO.rst deleted file mode 100644 index 97a11efdc..000000000 --- a/contrib/kubernetes/TODO.rst +++ /dev/null @@ -1,13 +0,0 @@ -TODO: -- Forbid deleting master cluster before deleting node cluster. -- Limit to no more than 1 node in master cluster. -- Drain node before deleting worker node. -- More validation before cluster creation. -- More exception catcher in code. - -Done: - -- Add ability to do actions on cluster creation/deletion. -- Add more network interfaces in drivers. -- Add kubernetes master profile, use kubeadm to setup one master node. -- Add kubernetes node profile, auto retrieve kubernetes data from master cluster. diff --git a/contrib/kubernetes/examples/kubemaster.yaml b/contrib/kubernetes/examples/kubemaster.yaml deleted file mode 100644 index c28c10570..000000000 --- a/contrib/kubernetes/examples/kubemaster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -type: senlin.kubernetes.master -version: 1.0 -properties: - flavor: k8s.master - image: ubuntu-16.04 - key_name: elynn - public_network: public diff --git a/contrib/kubernetes/examples/kubenode.yaml b/contrib/kubernetes/examples/kubenode.yaml deleted file mode 100644 index 59a9b55db..000000000 --- a/contrib/kubernetes/examples/kubenode.yaml +++ /dev/null @@ -1,7 +0,0 @@ -type: senlin.kubernetes.worker -version: 1.0 -properties: - flavor: k8s.worker - image: ubuntu-16.04 - key_name: elynn - master_cluster: cm diff --git a/contrib/kubernetes/kube/__init__.py b/contrib/kubernetes/kube/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/contrib/kubernetes/kube/base.py b/contrib/kubernetes/kube/base.py deleted file mode 100644 index 5f2bde37c..000000000 --- a/contrib/kubernetes/kube/base.py +++ /dev/null @@ -1,275 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import random -import string - -from oslo_log import log as logging - -from senlin.common import context -from senlin.common import exception as exc -from senlin.objects import cluster as cluster_obj -from senlin.profiles.os.nova import server - -LOG = logging.getLogger(__name__) - - -def GenKubeToken(): - token_id = ''.join([random.choice( - string.digits + string.ascii_lowercase) for i in range(6)]) - token_secret = ''.join([random.choice( - string.digits + string.ascii_lowercase) for i in range(16)]) - token = '.'.join([token_id, token_secret]) - return token - - -def loadScript(path): - script_file = os.path.join(os.path.dirname(__file__), path) - with open(script_file, "r") as f: - content = f.read() - return content - - -class KubeBaseProfile(server.ServerProfile): - """Kubernetes Base Profile.""" - - def __init__(self, type_name, name, **kwargs): - super(KubeBaseProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - - def _generate_kubeadm_token(self, obj): - token = GenKubeToken() - # store generated token - - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - data = obj.data - data[self.KUBEADM_TOKEN] = token - cluster_obj.Cluster.update(ctx, obj.id, {'data': data}) - return token - - def _get_kubeadm_token(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data.get(self.KUBEADM_TOKEN) - return None - - def _update_master_ip(self, obj, ip): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - cluster.data['kube_master_ip'] = ip - cluster.update(ctx, obj.cluster_id, {'data': cluster.data}) - - def _create_network(self, obj): - client = self.network(obj) - try: - net = client.network_create() - subnet = client.subnet_create(network_id=net.id, - cidr='10.7.0.0/24', - ip_version=4) - except exc.InternalError as ex: - raise exc.EResourceCreation(type='kubernetes', - message=str(ex), - resource_id=obj.id) - pub_net = client.network_get(self.properties[self.PUBLIC_NETWORK]) - try: - router = client.router_create( - external_gateway_info={"network_id": pub_net.id}) - client.add_interface_to_router(router, subnet_id=subnet.id) - fip = client.floatingip_create(floating_network_id=pub_net.id) - except exc.InternalError as ex: - raise exc.EResourceCreation(type='kubernetes', - message=str(ex), - resource_id=obj.id) - - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - data = obj.data - data[self.PRIVATE_NETWORK] = net.id - data[self.PRIVATE_SUBNET] = subnet.id - data[self.PRIVATE_ROUTER] = router.id - data[self.KUBE_MASTER_FLOATINGIP] = fip.floating_ip_address - data[self.KUBE_MASTER_FLOATINGIP_ID] = fip.id - - cluster_obj.Cluster.update(ctx, obj.id, {'data': data}) - - return net.id - - def _delete_network(self, obj): - client = self.network(obj) - fip_id = obj.data.get(self.KUBE_MASTER_FLOATINGIP_ID) - if fip_id: - try: - # delete floating ip - client.floatingip_delete(fip_id) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', id=fip_id, - message=str(ex)) - - router = obj.data.get(self.PRIVATE_ROUTER) - subnet = obj.data.get(self.PRIVATE_SUBNET) - if router and subnet: - try: - client.remove_interface_from_router(router, subnet_id=subnet) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', - id=subnet, - message=str(ex)) - - if router: - try: - # delete router - client.router_delete(router, ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', - id=router, - message=str(ex)) - - net = obj.data.get(self.PRIVATE_NETWORK) - if net: - try: - # delete network - client.network_delete(net, ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', - id=net, - message=str(ex)) - - def _associate_floatingip(self, obj, server): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - fip = cluster.data.get(self.KUBE_MASTER_FLOATINGIP) - if fip: - try: - self.compute(obj).server_floatingip_associate(server, - fip) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='floatingip', - type='kubernetes', - id=fip, - message=str(ex)) - - def _disassociate_floatingip(self, obj, server): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - fip = cluster.data.get(self.KUBE_MASTER_FLOATINGIP) - if fip: - try: - self.compute(obj).server_floatingip_disassociate(server, - fip) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='floatingip', - type='kubernetes', - id=fip, - message=str(ex)) - - def _get_cluster_data(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data - return {} - - def _get_network(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data.get(self.PRIVATE_NETWORK) - return None - - def _create_security_group(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - sgid = obj.data.get(self.SECURITY_GROUP, None) - if sgid: - return sgid - - client = self.network(obj) - try: - sg = client.security_group_create(name=self.name) - except Exception as ex: - raise exc.EResourceCreation(type='kubernetes', - message=str(ex)) - data = obj.data - data[self.SECURITY_GROUP] = sg.id - cluster_obj.Cluster.update(ctx, obj.id, {'data': data}) - self._set_security_group_rules(obj, sg.id) - - return sg.id - - def _get_security_group(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data.get(self.SECURITY_GROUP) - return None - - def _set_security_group_rules(self, obj, sgid): - client = self.network(obj) - open_ports = { - 'tcp': [22, 80, 8000, 8080, 6443, 8001, 8443, 443, - 179, 8082, 8086], - 'udp': [8285, 8472], - 'icmp': [None] - } - for p in open_ports.keys(): - for port in open_ports[p]: - try: - client.security_group_rule_create(sgid, port, protocol=p) - except Exception as ex: - raise exc.EResourceCreation(type='kubernetes', - message=str(ex)) - - def _delete_security_group(self, obj): - sgid = obj.data.get(self.SECURITY_GROUP) - if sgid: - try: - self.network(obj).security_group_delete(sgid, - ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', - id=sgid, - message=str(ex)) - - def do_validate(self, obj): - """Validate if the spec has provided valid info for server creation. - - :param obj: The node object. - """ - # validate flavor - flavor = self.properties[self.FLAVOR] - self._validate_flavor(obj, flavor) - - # validate image - image = self.properties[self.IMAGE] - if image is not None: - self._validate_image(obj, image) - - # validate key_name - keypair = self.properties[self.KEY_NAME] - if keypair is not None: - self._validate_keypair(obj, keypair) - - return True diff --git a/contrib/kubernetes/kube/master.py b/contrib/kubernetes/kube/master.py deleted file mode 100644 index 1f2e99086..000000000 --- a/contrib/kubernetes/kube/master.py +++ /dev/null @@ -1,279 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 - -import jinja2 -from oslo_log import log as logging -from oslo_utils import encodeutils - -from kube import base -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema - -LOG = logging.getLogger(__name__) - - -class ServerProfile(base.KubeBaseProfile): - """Profile for an kubernetes master server.""" - - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2017.10'} - ] - } - - KEYS = ( - CONTEXT, FLAVOR, IMAGE, KEY_NAME, - PUBLIC_NETWORK, BLOCK_DEVICE_MAPPING_V2, - ) = ( - 'context', 'flavor', 'image', 'key_name', - 'public_network', 'block_device_mapping_v2', - ) - - INTERNAL_KEYS = ( - KUBEADM_TOKEN, KUBE_MASTER_IP, SECURITY_GROUP, - PRIVATE_NETWORK, PRIVATE_SUBNET, PRIVATE_ROUTER, - KUBE_MASTER_FLOATINGIP, KUBE_MASTER_FLOATINGIP_ID, - SCALE_OUT_RECV_ID, SCALE_OUT_URL, - ) = ( - 'kubeadm_token', 'kube_master_ip', 'security_group', - 'private_network', 'private_subnet', 'private_router', - 'kube_master_floatingip', 'kube_master_floatingip_id', - 'scale_out_recv_id', 'scale_out_url', - ) - - NETWORK_KEYS = ( - PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, - FLOATING_NETWORK, FLOATING_IP, - ) = ( - 'port', 'fixed_ip', 'network', 'security_groups', - 'floating_network', 'floating_ip', - ) - - BDM2_KEYS = ( - BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, - BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, - BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, - BDM2_DELETE_ON_TERMINATION, - ) = ( - 'uuid', 'source_type', 'destination_type', 'disk_bus', - 'device_name', 'volume_size', 'guest_format', 'boot_index', - 'device_type', 'delete_on_termination', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('Customized security context for operating servers.'), - ), - FLAVOR: schema.String( - _('ID of flavor used for the server.'), - required=True, - updatable=True, - ), - IMAGE: schema.String( - # IMAGE is not required, because there could be BDM or BDMv2 - # support and the corresponding settings effective - _('ID of image to be used for the new server.'), - updatable=True, - ), - KEY_NAME: schema.String( - _('Name of Nova keypair to be injected to server.'), - ), - PUBLIC_NETWORK: schema.String( - _('Public network for kubernetes.'), - required=True, - ), - BLOCK_DEVICE_MAPPING_V2: schema.List( - _('A list specifying the properties of block devices to be used ' - 'for this server.'), - schema=schema.Map( - _('A map specifying the properties of a block device to be ' - 'used by the server.'), - schema={ - BDM2_UUID: schema.String( - _('ID of the source image, snapshot or volume'), - ), - BDM2_SOURCE_TYPE: schema.String( - _("Volume source type, must be one of 'image', " - "'snapshot', 'volume' or 'blank'"), - required=True, - ), - BDM2_DESTINATION_TYPE: schema.String( - _("Volume destination type, must be 'volume' or " - "'local'"), - required=True, - ), - BDM2_DISK_BUS: schema.String( - _('Bus of the device.'), - ), - BDM2_DEVICE_NAME: schema.String( - _('Name of the device(e.g. vda, xda, ....).'), - ), - BDM2_VOLUME_SIZE: schema.Integer( - _('Size of the block device in MB(for swap) and ' - 'in GB(for other formats)'), - required=True, - ), - BDM2_GUEST_FORMAT: schema.String( - _('Specifies the disk file system format(e.g. swap, ' - 'ephemeral, ...).'), - ), - BDM2_BOOT_INDEX: schema.Integer( - _('Define the boot order of the device'), - ), - BDM2_DEVICE_TYPE: schema.String( - _('Type of the device(e.g. disk, cdrom, ...).'), - ), - BDM2_DELETE_ON_TERMINATION: schema.Boolean( - _('Whether to delete the volume when the server ' - 'stops.'), - ), - } - ), - ), - } - - def __init__(self, type_name, name, **kwargs): - super(ServerProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - - def do_cluster_create(self, obj): - self._generate_kubeadm_token(obj) - self._create_security_group(obj) - self._create_network(obj) - - def do_cluster_delete(self, obj): - if obj.dependents and 'kube-node' in obj.dependents: - msg = ("Cluster %s delete failed, " - "Node clusters %s must be deleted first." % - (obj.id, obj.dependents['kube-node'])) - raise exc.EResourceDeletion(type='kubernetes.master', - id=obj.id, - message=msg) - self._delete_network(obj) - self._delete_security_group(obj) - - def do_create(self, obj): - """Create a server for the node object. - - :param obj: The node object for which a server will be created. - """ - kwargs = {} - for key in self.KEYS: - if self.properties[key] is not None: - kwargs[key] = self.properties[key] - - image_ident = self.properties[self.IMAGE] - if image_ident is not None: - image = self._validate_image(obj, image_ident, 'create') - kwargs.pop(self.IMAGE) - kwargs['imageRef'] = image.id - - flavor_ident = self.properties[self.FLAVOR] - flavor = self._validate_flavor(obj, flavor_ident, 'create') - kwargs.pop(self.FLAVOR) - kwargs['flavorRef'] = flavor.id - - keypair_name = self.properties[self.KEY_NAME] - if keypair_name: - keypair = self._validate_keypair(obj, keypair_name, 'create') - kwargs['key_name'] = keypair.name - - kwargs['name'] = obj.name - - metadata = self._build_metadata(obj, {}) - kwargs['metadata'] = metadata - - jj_vars = {} - cluster_data = self._get_cluster_data(obj) - kwargs['networks'] = [{'uuid': cluster_data[self.PRIVATE_NETWORK]}] - - # Get user_data parameters from metadata - jj_vars['KUBETOKEN'] = cluster_data[self.KUBEADM_TOKEN] - jj_vars['MASTER_FLOATINGIP'] = cluster_data[ - self.KUBE_MASTER_FLOATINGIP] - - block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] - if block_device_mapping_v2 is not None: - kwargs['block_device_mapping_v2'] = self._resolve_bdm( - obj, block_device_mapping_v2, 'create') - - # user_data = self.properties[self.USER_DATA] - user_data = base.loadScript('./scripts/master.sh') - if user_data is not None: - # Use jinja2 to replace variables defined in user_data - try: - jj_t = jinja2.Template(user_data) - user_data = jj_t.render(**jj_vars) - except (jinja2.exceptions.UndefinedError, ValueError) as ex: - # TODO(anyone) Handle jinja2 error - pass - ud = encodeutils.safe_encode(user_data) - kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) - - sgid = self._get_security_group(obj) - kwargs['security_groups'] = [{'name': sgid}] - - server = None - resource_id = None - try: - server = self.compute(obj).server_create(**kwargs) - self.compute(obj).wait_for_server(server.id) - server = self.compute(obj).server_get(server.id) - self._update_master_ip(obj, server.addresses[''][0]['addr']) - self._associate_floatingip(obj, server) - LOG.info("Created master node: %s" % server.id) - return server.id - except exc.InternalError as ex: - if server and server.id: - resource_id = server.id - raise exc.EResourceCreation(type='server', - message=str(ex), - resource_id=resource_id) - - def do_delete(self, obj, **params): - """Delete the physical resource associated with the specified node. - - :param obj: The node object to operate on. - :param kwargs params: Optional keyword arguments for the delete - operation. - :returns: This operation always return True unless exception is - caught. - :raises: `EResourceDeletion` if interaction with compute service fails. - """ - if not obj.physical_id: - return True - - server_id = obj.physical_id - ignore_missing = params.get('ignore_missing', True) - internal_ports = obj.data.get('internal_ports', []) - force = params.get('force', False) - - try: - self._disassociate_floatingip(obj, server_id) - driver = self.compute(obj) - if force: - driver.server_force_delete(server_id, ignore_missing) - else: - driver.server_delete(server_id, ignore_missing) - driver.wait_for_server_delete(server_id) - if internal_ports: - ex = self._delete_ports(obj, internal_ports) - if ex: - raise ex - return True - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='server', id=server_id, - message=str(ex)) diff --git a/contrib/kubernetes/kube/scripts/master.sh b/contrib/kubernetes/kube/scripts/master.sh deleted file mode 100644 index 84b7bcea1..000000000 --- a/contrib/kubernetes/kube/scripts/master.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -HOSTNAME=`hostname` -echo "127.0.0.1 $HOSTNAME" >> /etc/hosts -apt-get update && apt-get install -y docker.io curl apt-transport-https -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list -apt-get update -apt-get install -y kubelet kubeadm kubectl -PODNETWORKCIDR=10.244.0.0/16 -kubeadm init --token {{ KUBETOKEN }} --skip-preflight-checks --pod-network-cidr=$PODNETWORKCIDR --apiserver-cert-extra-sans={{ MASTER_FLOATINGIP}} --token-ttl 0 -mkdir -p $HOME/.kube -cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -chown $(id -u):$(id -g) $HOME/.kube/config -mkdir -p root/.kube -cp -i /etc/kubernetes/admin.conf root/.kube/config -chown root:root root/.kube/config -cp -i /etc/kubernetes/admin.conf /opt/admin.kubeconf -echo "# Setup network pod" -kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.9.0/Documentation/kube-flannel.yml -echo "# Install kubernetes dashboard" -kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml -echo "# Install heapster" -kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/grafana.yaml -kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml -kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml -kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml -echo "# Download monitor script" -curl -o /opt/monitor.sh https://raw.githubusercontent.com/lynic/templates/master/k8s/monitor.sh -chmod a+x /opt/monitor.sh -echo "*/1 * * * * root bash /opt/monitor.sh 2>&1 >> /var/log/kube-minitor.log" > /etc/cron.d/kube-monitor -systemctl restart cron -echo "# Get status" -kubectl get nodes \ No newline at end of file diff --git a/contrib/kubernetes/kube/scripts/worker.sh b/contrib/kubernetes/kube/scripts/worker.sh deleted file mode 100644 index 2bc14cf2a..000000000 --- a/contrib/kubernetes/kube/scripts/worker.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -HOSTNAME=`hostname` -echo "127.0.0.1 $HOSTNAME" >> /etc/hosts -apt-get update && apt-get install -y docker.io curl apt-transport-https -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list -apt-get update -apt-get install -y kubelet kubeadm kubectl -MASTER_IP={{ MASTERIP }} -kubeadm join --token {{ KUBETOKEN }} --skip-preflight-checks --discovery-token-unsafe-skip-ca-verification $MASTER_IP:6443 diff --git a/contrib/kubernetes/kube/worker.py b/contrib/kubernetes/kube/worker.py deleted file mode 100644 index 26ca641e5..000000000 --- a/contrib/kubernetes/kube/worker.py +++ /dev/null @@ -1,353 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 - -import jinja2 -from oslo_log import log as logging -from oslo_utils import encodeutils - -from kube import base -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.objects import cluster as cluster_obj - -LOG = logging.getLogger(__name__) - - -class ServerProfile(base.KubeBaseProfile): - """Profile for an kubernetes node server.""" - - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2017.10'} - ] - } - - KEYS = ( - CONTEXT, FLAVOR, IMAGE, KEY_NAME, BLOCK_DEVICE_MAPPING_V2 - ) = ( - 'context', 'flavor', 'image', 'key_name', 'block_device_mapping_v2', - ) - - KUBE_KEYS = ( - MASTER_CLUSTER, - ) = ( - 'master_cluster', - ) - - MASTER_CLUSTER_KEYS = ( - KUBEADM_TOKEN, KUBE_MASTER_IP, - PRIVATE_NETWORK, PRIVATE_SUBNET, PRIVATE_ROUTER, - ) = ( - 'kubeadm_token', 'kube_master_ip', - 'private_network', 'private_subnet', 'private_router', - ) - - INTERNAL_KEYS = ( - SECURITY_GROUP, SCALE_OUT_RECV_ID, SCALE_OUT_URL, - ) = ( - 'security_group', 'scale_out_recv_id', 'scale_out_url', - ) - - NETWORK_KEYS = ( - PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, - FLOATING_NETWORK, FLOATING_IP, - ) = ( - 'port', 'fixed_ip', 'network', 'security_groups', - 'floating_network', 'floating_ip', - ) - - BDM2_KEYS = ( - BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, - BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, - BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, - BDM2_DELETE_ON_TERMINATION, - ) = ( - 'uuid', 'source_type', 'destination_type', 'disk_bus', - 'device_name', 'volume_size', 'guest_format', 'boot_index', - 'device_type', 'delete_on_termination', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('Customized security context for operating servers.'), - ), - FLAVOR: schema.String( - _('ID of flavor used for the server.'), - required=True, - updatable=True, - ), - IMAGE: schema.String( - # IMAGE is not required, because there could be BDM or BDMv2 - # support and the corresponding settings effective - _('ID of image to be used for the new server.'), - updatable=True, - ), - KEY_NAME: schema.String( - _('Name of Nova keypair to be injected to server.'), - ), - MASTER_CLUSTER: schema.String( - _('Cluster running kubernetes master.'), - required=True, - ), - BLOCK_DEVICE_MAPPING_V2: schema.List( - _('A list specifying the properties of block devices to be used ' - 'for this server.'), - schema=schema.Map( - _('A map specifying the properties of a block device to be ' - 'used by the server.'), - schema={ - BDM2_UUID: schema.String( - _('ID of the source image, snapshot or volume'), - ), - BDM2_SOURCE_TYPE: schema.String( - _("Volume source type, must be one of 'image', " - "'snapshot', 'volume' or 'blank'"), - required=True, - ), - BDM2_DESTINATION_TYPE: schema.String( - _("Volume destination type, must be 'volume' or " - "'local'"), - required=True, - ), - BDM2_DISK_BUS: schema.String( - _('Bus of the device.'), - ), - BDM2_DEVICE_NAME: schema.String( - _('Name of the device(e.g. vda, xda, ....).'), - ), - BDM2_VOLUME_SIZE: schema.Integer( - _('Size of the block device in MB(for swap) and ' - 'in GB(for other formats)'), - required=True, - ), - BDM2_GUEST_FORMAT: schema.String( - _('Specifies the disk file system format(e.g. swap, ' - 'ephemeral, ...).'), - ), - BDM2_BOOT_INDEX: schema.Integer( - _('Define the boot order of the device'), - ), - BDM2_DEVICE_TYPE: schema.String( - _('Type of the device(e.g. disk, cdrom, ...).'), - ), - BDM2_DELETE_ON_TERMINATION: schema.Boolean( - _('Whether to delete the volume when the server ' - 'stops.'), - ), - } - ), - ), - } - - def __init__(self, type_name, name, **kwargs): - super(ServerProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - - def _get_master_cluster_info(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - master = self.properties[self.MASTER_CLUSTER] - try: - cluster = cluster_obj.Cluster.find(ctx, master) - except Exception as ex: - raise exc.EResourceCreation(type='kubernetes.worker', - message=str(ex)) - for key in self.MASTER_CLUSTER_KEYS: - if key not in cluster.data: - raise exc.EResourceCreation( - type='kubernetes.worker', - message="Can't find %s in cluster %s" % (key, master)) - - return cluster.data - - def _set_cluster_dependents(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - master = self.properties[self.MASTER_CLUSTER] - try: - master_cluster = cluster_obj.Cluster.find(ctx, master) - except exc.ResourceNotFound: - msg = _("Cannot find the given cluster: %s") % master - raise exc.BadRequest(msg=msg) - if master_cluster: - # configure kube master dependents, kube master record kube node - # cluster uuid - master_dependents = master_cluster.dependents - master_dependents['kube-node'] = obj.id - cluster_obj.Cluster.update(ctx, master_cluster.id, - {'dependents': master_dependents}) - - def _del_cluster_dependents(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - master = self.properties[self.MASTER_CLUSTER] - try: - master_cluster = cluster_obj.Cluster.find(ctx, master) - except exc.ResourceNotFound: - msg = _("Cannot find the given cluster: %s") % master - raise exc.BadRequest(msg=msg) - - if master_cluster: - # remove kube master record kube node dependents - master_dependents = master_cluster.dependents - if master_dependents and 'kube-node' in master_dependents: - master_dependents.pop('kube-node') - cluster_obj.Cluster.update(ctx, master_cluster.id, - {'dependents': master_dependents}) - - def _get_cluster_data(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data - - return {} - - def do_cluster_create(self, obj): - self._create_security_group(obj) - self._set_cluster_dependents(obj) - - def do_cluster_delete(self, obj): - self._delete_security_group(obj) - self._del_cluster_dependents(obj) - - def do_validate(self, obj): - """Validate if the spec has provided valid info for server creation. - - :param obj: The node object. - """ - # validate flavor - flavor = self.properties[self.FLAVOR] - self._validate_flavor(obj, flavor) - - # validate image - image = self.properties[self.IMAGE] - if image is not None: - self._validate_image(obj, image) - - # validate key_name - keypair = self.properties[self.KEY_NAME] - if keypair is not None: - self._validate_keypair(obj, keypair) - - return True - - def do_create(self, obj): - """Create a server for the node object. - - :param obj: The node object for which a server will be created. - """ - kwargs = {} - for key in self.KEYS: - if self.properties[key] is not None: - kwargs[key] = self.properties[key] - - image_ident = self.properties[self.IMAGE] - if image_ident is not None: - image = self._validate_image(obj, image_ident, 'create') - kwargs.pop(self.IMAGE) - kwargs['imageRef'] = image.id - - flavor_ident = self.properties[self.FLAVOR] - flavor = self._validate_flavor(obj, flavor_ident, 'create') - kwargs.pop(self.FLAVOR) - kwargs['flavorRef'] = flavor.id - - keypair_name = self.properties[self.KEY_NAME] - if keypair_name: - keypair = self._validate_keypair(obj, keypair_name, 'create') - kwargs['key_name'] = keypair.name - - kwargs['name'] = obj.name - - metadata = self._build_metadata(obj, {}) - kwargs['metadata'] = metadata - - sgid = self._get_security_group(obj) - kwargs['security_groups'] = [{'name': sgid}] - - jj_vars = {} - master_cluster = self._get_master_cluster_info(obj) - kwargs['networks'] = [{'uuid': master_cluster[self.PRIVATE_NETWORK]}] - jj_vars['KUBETOKEN'] = master_cluster[self.KUBEADM_TOKEN] - jj_vars['MASTERIP'] = master_cluster[self.KUBE_MASTER_IP] - - block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] - if block_device_mapping_v2 is not None: - kwargs['block_device_mapping_v2'] = self._resolve_bdm( - obj, block_device_mapping_v2, 'create') - - user_data = base.loadScript('./scripts/worker.sh') - if user_data is not None: - # Use jinja2 to replace variables defined in user_data - try: - jj_t = jinja2.Template(user_data) - user_data = jj_t.render(**jj_vars) - except (jinja2.exceptions.UndefinedError, ValueError) as ex: - # TODO(anyone) Handle jinja2 error - pass - ud = encodeutils.safe_encode(user_data) - kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) - - server = None - resource_id = None - try: - server = self.compute(obj).server_create(**kwargs) - self.compute(obj).wait_for_server(server.id) - server = self.compute(obj).server_get(server.id) - return server.id - except exc.InternalError as ex: - if server and server.id: - resource_id = server.id - raise exc.EResourceCreation(type='server', - message=str(ex), - resource_id=resource_id) - - def do_delete(self, obj, **params): - """Delete the physical resource associated with the specified node. - - :param obj: The node object to operate on. - :param kwargs params: Optional keyword arguments for the delete - operation. - :returns: This operation always return True unless exception is - caught. - :raises: `EResourceDeletion` if interaction with compute service fails. - """ - if not obj.physical_id: - return True - - server_id = obj.physical_id - ignore_missing = params.get('ignore_missing', True) - internal_ports = obj.data.get('internal_ports', []) - force = params.get('force', False) - - try: - driver = self.compute(obj) - if force: - driver.server_force_delete(server_id, ignore_missing) - else: - driver.server_delete(server_id, ignore_missing) - driver.wait_for_server_delete(server_id) - if internal_ports: - ex = self._delete_ports(obj, internal_ports) - if ex: - raise ex - return True - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='server', id=server_id, - message=str(ex)) diff --git a/contrib/kubernetes/requirements.txt b/contrib/kubernetes/requirements.txt deleted file mode 100644 index 98592e47d..000000000 --- a/contrib/kubernetes/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Jinja2>=2.8,!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4 # BSD License (3 clause) \ No newline at end of file diff --git a/contrib/kubernetes/setup.cfg b/contrib/kubernetes/setup.cfg deleted file mode 100644 index d525be9c4..000000000 --- a/contrib/kubernetes/setup.cfg +++ /dev/null @@ -1,28 +0,0 @@ -[metadata] -name = senlin-kubernetes -summary = Kubernetes profile for senlin -description-file = - README.rst -author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://docs.openstack.org/senlin/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -[entry_points] -senlin.profiles = - senlin.kubernetes.master-1.0 = kube.master:ServerProfile - senlin.kubernetes.worker-1.0 = kube.worker:ServerProfile - -[global] -setup-hooks = - pbr.hooks.setup_hook diff --git a/contrib/kubernetes/setup.py b/contrib/kubernetes/setup.py deleted file mode 100644 index 98b93ebc5..000000000 --- a/contrib/kubernetes/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/contrib/vdu/README.rst b/contrib/vdu/README.rst deleted file mode 100644 index f7ae358b6..000000000 --- a/contrib/vdu/README.rst +++ /dev/null @@ -1,13 +0,0 @@ -# VDU Profile for NFV - -## Install -```bash -pip install --editable . -``` - -## Usage -```bash -. openrc demo demo -senlin profile-create vdu-profile -s examples/vdu.yaml -senlin cluster-create vdu-cluster -p vdu-profile -M config='{"word": "world"}' -c 1 -``` diff --git a/contrib/vdu/examples/vdu.yaml b/contrib/vdu/examples/vdu.yaml deleted file mode 100644 index 8daee4db2..000000000 --- a/contrib/vdu/examples/vdu.yaml +++ /dev/null @@ -1,17 +0,0 @@ -type: os.senlin.vdu -version: 1.0 -properties: - flavor: m1.tiny - image: "cirros-0.3.4-x86_64-uec" - networks: - - network: private - security_groups: - - default - floating_network: public - metadata: - test_key: test_value - user_data: | - #!/bin/sh - echo 'hello, {{ word }}' - echo '{{ ports.0.fixed_ips.0.ip_address }}' - echo '{{ ports.0.floating_ip_address }}' diff --git a/contrib/vdu/requirements.txt b/contrib/vdu/requirements.txt deleted file mode 100644 index 98592e47d..000000000 --- a/contrib/vdu/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Jinja2>=2.8,!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4 # BSD License (3 clause) \ No newline at end of file diff --git a/contrib/vdu/setup.cfg b/contrib/vdu/setup.cfg deleted file mode 100644 index 58e92cf15..000000000 --- a/contrib/vdu/setup.cfg +++ /dev/null @@ -1,31 +0,0 @@ -[metadata] -name = senlin-vdu -summary = VDU profile for senlin -description-file = - README.rst -author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://docs.openstack.org/senlin/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -# [files] -# packages = -# senlin-vdu - -[entry_points] -senlin.profiles = - os.senlin.vdu-1.0 = vdu.server:ServerProfile - -[global] -setup-hooks = - pbr.hooks.setup_hook diff --git a/contrib/vdu/setup.py b/contrib/vdu/setup.py deleted file mode 100644 index 736375744..000000000 --- a/contrib/vdu/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/contrib/vdu/vdu/__init__.py b/contrib/vdu/vdu/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/contrib/vdu/vdu/server.py b/contrib/vdu/vdu/server.py deleted file mode 100644 index 43500b416..000000000 --- a/contrib/vdu/vdu/server.py +++ /dev/null @@ -1,1469 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import copy - -import jinja2 -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils - -from senlin.common import constraints -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.objects import cluster as cluster_obj -from senlin.objects import node as node_obj -from senlin.profiles import base - -LOG = logging.getLogger(__name__) - - -class ServerProfile(base.Profile): - """Profile for an OpenStack Nova server.""" - - VERSIONS = { - '1.0': [ - {'status': consts.SUPPORTED, 'since': '2016.04'} - ] - } - - KEYS = ( - CONTEXT, ADMIN_PASS, AUTO_DISK_CONFIG, AVAILABILITY_ZONE, - BLOCK_DEVICE_MAPPING_V2, - CONFIG_DRIVE, FLAVOR, IMAGE, KEY_NAME, METADATA, - NAME, NETWORKS, PERSONALITY, SECURITY_GROUPS, - USER_DATA, SCHEDULER_HINTS, - ) = ( - 'context', 'admin_pass', 'auto_disk_config', 'availability_zone', - 'block_device_mapping_v2', - 'config_drive', 'flavor', 'image', 'key_name', 'metadata', - 'name', 'networks', 'personality', 'security_groups', - 'user_data', 'scheduler_hints', - ) - - BDM2_KEYS = ( - BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, - BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, - BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, - BDM2_DELETE_ON_TERMINATION, - ) = ( - 'uuid', 'source_type', 'destination_type', 'disk_bus', - 'device_name', 'volume_size', 'guest_format', 'boot_index', - 'device_type', 'delete_on_termination', - ) - - NETWORK_KEYS = ( - PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, - FLOATING_NETWORK, FLOATING_IP, - ) = ( - 'port', 'fixed_ip', 'network', 'security_groups', - 'floating_network', 'floating_ip', - ) - - PERSONALITY_KEYS = ( - PATH, CONTENTS, - ) = ( - 'path', 'contents', - ) - - SCHEDULER_HINTS_KEYS = ( - GROUP, - ) = ( - 'group', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('Customized security context for operating servers.'), - ), - ADMIN_PASS: schema.String( - _('Password for the administrator account.'), - ), - AUTO_DISK_CONFIG: schema.Boolean( - _('Whether the disk partition is done automatically.'), - default=True, - ), - AVAILABILITY_ZONE: schema.String( - _('Name of availability zone for running the server.'), - ), - BLOCK_DEVICE_MAPPING_V2: schema.List( - _('A list specifying the properties of block devices to be used ' - 'for this server.'), - schema=schema.Map( - _('A map specifying the properties of a block device to be ' - 'used by the server.'), - schema={ - BDM2_UUID: schema.String( - _('ID of the source image, snapshot or volume'), - ), - BDM2_SOURCE_TYPE: schema.String( - _('Volume source type, should be image, snapshot, ' - 'volume or blank'), - required=True, - ), - BDM2_DESTINATION_TYPE: schema.String( - _('Volume destination type, should be volume or ' - 'local'), - required=True, - ), - BDM2_DISK_BUS: schema.String( - _('Bus of the device.'), - ), - BDM2_DEVICE_NAME: schema.String( - _('Name of the device(e.g. vda, xda, ....).'), - ), - BDM2_VOLUME_SIZE: schema.Integer( - _('Size of the block device in MB(for swap) and ' - 'in GB(for other formats)'), - required=True, - ), - BDM2_GUEST_FORMAT: schema.String( - _('Specifies the disk file system format(e.g. swap, ' - 'ephemeral, ...).'), - ), - BDM2_BOOT_INDEX: schema.Integer( - _('Define the boot order of the device'), - ), - BDM2_DEVICE_TYPE: schema.String( - _('Type of the device(e.g. disk, cdrom, ...).'), - ), - BDM2_DELETE_ON_TERMINATION: schema.Boolean( - _('Whether to delete the volume when the server ' - 'stops.'), - ), - } - ), - ), - CONFIG_DRIVE: schema.Boolean( - _('Whether config drive should be enabled for the server.'), - ), - FLAVOR: schema.String( - _('ID of flavor used for the server.'), - required=True, - updatable=True, - ), - IMAGE: schema.String( - # IMAGE is not required, because there could be BDM or BDMv2 - # support and the corresponding settings effective - _('ID of image to be used for the new server.'), - updatable=True, - ), - KEY_NAME: schema.String( - _('Name of Nova keypair to be injected to server.'), - ), - METADATA: schema.Map( - _('A collection of key/value pairs to be associated with the ' - 'server created. Both key and value should be <=255 chars.'), - updatable=True, - ), - NAME: schema.String( - _('Name of the server. When omitted, the node name will be used.'), - updatable=True, - ), - NETWORKS: schema.List( - _('List of networks for the server.'), - schema=schema.Map( - _('A map specifying the properties of a network for uses.'), - schema={ - NETWORK: schema.String( - _('Name or ID of network to create a port on.'), - ), - PORT: schema.String( - _('Port ID to be used by the network.'), - ), - FIXED_IP: schema.String( - _('Fixed IP to be used by the network.'), - ), - PORT_SECURITY_GROUPS: schema.List( - _('A list of security groups to be attached to ' - 'this port.'), - schema=schema.String( - _('Name of a security group'), - required=True, - ), - ), - FLOATING_NETWORK: schema.String( - _('The network on which to create a floating IP'), - ), - FLOATING_IP: schema.String( - _('The floating IP address to be associated with ' - 'this port.'), - ), - }, - ), - updatable=True, - ), - PERSONALITY: schema.List( - _('List of files to be injected into the server, where each.'), - schema=schema.Map( - _('A map specifying the path & contents for an injected ' - 'file.'), - schema={ - PATH: schema.String( - _('In-instance path for the file to be injected.'), - required=True, - ), - CONTENTS: schema.String( - _('Contents of the file to be injected.'), - required=True, - ), - }, - ), - ), - SCHEDULER_HINTS: schema.Map( - _('A collection of key/value pairs to be associated with the ' - 'Scheduler hints. Both key and value should be <=255 chars.'), - ), - - SECURITY_GROUPS: schema.List( - _('List of security groups.'), - schema=schema.String( - _('Name of a security group'), - required=True, - ), - ), - USER_DATA: schema.String( - _('User data to be exposed by the metadata server.'), - ), - } - - OP_NAMES = ( - OP_REBOOT, OP_REBUILD, OP_CHANGE_PASSWORD, OP_PAUSE, OP_UNPAUSE, - OP_SUSPEND, OP_RESUME, OP_LOCK, OP_UNLOCK, OP_START, OP_STOP, - OP_RESCUE, OP_UNRESCUE, OP_EVACUATE, - ) = ( - 'reboot', 'rebuild', 'change_password', 'pause', 'unpause', - 'suspend', 'resume', 'lock', 'unlock', 'start', 'stop', - 'rescue', 'unrescue', 'evacuate', - ) - - REBOOT_TYPE = 'type' - REBOOT_TYPES = (REBOOT_SOFT, REBOOT_HARD) = ('SOFT', 'HARD') - ADMIN_PASSWORD = 'admin_pass' - RESCUE_IMAGE = 'image_ref' - EVACUATE_OPTIONS = ( - EVACUATE_HOST, EVACUATE_FORCE - ) = ( - 'host', 'force' - ) - - OPERATIONS = { - OP_REBOOT: schema.Operation( - _("Reboot the nova server."), - schema={ - REBOOT_TYPE: schema.StringParam( - _("Type of reboot which can be 'SOFT' or 'HARD'."), - default=REBOOT_SOFT, - constraints=[ - constraints.AllowedValues(REBOOT_TYPES), - ] - ) - } - ), - OP_REBUILD: schema.Operation( - _("Rebuild the server using current image and admin password."), - ), - OP_CHANGE_PASSWORD: schema.Operation( - _("Change the administrator password."), - schema={ - ADMIN_PASSWORD: schema.StringParam( - _("New password for the administrator.") - ) - } - ), - OP_PAUSE: schema.Operation( - _("Pause the server from running."), - ), - OP_UNPAUSE: schema.Operation( - _("Unpause the server to running state."), - ), - OP_SUSPEND: schema.Operation( - _("Suspend the running of the server."), - ), - OP_RESUME: schema.Operation( - _("Resume the running of the server."), - ), - OP_LOCK: schema.Operation( - _("Lock the server."), - ), - OP_UNLOCK: schema.Operation( - _("Unlock the server."), - ), - OP_START: schema.Operation( - _("Start the server."), - ), - OP_STOP: schema.Operation( - _("Stop the server."), - ), - OP_RESCUE: schema.Operation( - _("Rescue the server."), - schema={ - RESCUE_IMAGE: schema.StringParam( - _("A string referencing the image to use."), - ), - } - ), - OP_UNRESCUE: schema.Operation( - _("Unrescue the server."), - ), - OP_EVACUATE: schema.Operation( - _("Evacuate the server to a different host."), - schema={ - EVACUATE_HOST: schema.StringParam( - _("The target host to evacuate the server."), - ), - EVACUATE_FORCE: schema.StringParam( - _("Whether the evacuation should be a forced one.") - ) - } - ) - } - - def __init__(self, type_name, name, **kwargs): - super(ServerProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - - def _validate_az(self, obj, az_name, reason=None): - try: - res = self.compute(obj).validate_azs([az_name]) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - else: - raise - - if not res: - msg = _("The specified %(key)s '%(value)s' could not be found" - ) % {'key': self.AVAILABILITY_ZONE, 'value': az_name} - if reason == 'create': - raise exc.EResourceCreation(type='server', message=msg) - else: - raise exc.InvalidSpec(message=msg) - - return az_name - - def _validate_flavor(self, obj, name_or_id, reason=None): - flavor = None - msg = '' - try: - flavor = self.compute(obj).flavor_find(name_or_id, False) - except exc.InternalError as ex: - msg = str(ex) - if reason is None: # reason is 'validate' - if ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.FLAVOR, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - if flavor is not None: - if not flavor.is_disabled: - return flavor - msg = _("The specified %(k)s '%(v)s' is disabled" - ) % {'k': self.FLAVOR, 'v': name_or_id} - - if reason == 'create': - raise exc.EResourceCreation(type='server', message=msg) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - else: - raise exc.InvalidSpec(message=msg) - - def _validate_image(self, obj, name_or_id, reason=None): - try: - return self.compute(obj).image_find(name_or_id, False) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - elif ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.IMAGE, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - def _validate_keypair(self, obj, name_or_id, reason=None): - try: - return self.compute(obj).keypair_find(name_or_id, False) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - elif ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.KEY_NAME, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - def do_validate(self, obj): - """Validate if the spec has provided valid info for server creation. - - :param obj: The node object. - """ - # validate availability_zone - az_name = self.properties[self.AVAILABILITY_ZONE] - if az_name is not None: - self._validate_az(obj, az_name) - - # validate flavor - flavor = self.properties[self.FLAVOR] - self._validate_flavor(obj, flavor) - - # validate image - image = self.properties[self.IMAGE] - if image is not None: - self._validate_image(obj, image) - - # validate key_name - keypair = self.properties[self.KEY_NAME] - if keypair is not None: - self._validate_keypair(obj, keypair) - - # validate networks - networks = self.properties[self.NETWORKS] - for net in networks: - self._validate_network(obj, net) - - return True - - def _resolve_bdm(self, bdm): - for bd in bdm: - for key in self.BDM2_KEYS: - if bd[key] is None: - del bd[key] - return bdm - - def _check_security_groups(self, nc, net_spec, result): - """Check security groups. - - :param nc: network driver connection. - :param net_spec: the specification to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - sgs = net_spec.get(self.PORT_SECURITY_GROUPS) - if not sgs: - return - - res = [] - try: - for sg in sgs: - sg_obj = nc.security_group_find(sg) - res.append(sg_obj.id) - except exc.InternalError as ex: - return str(ex) - - result[self.PORT_SECURITY_GROUPS] = res - return - - def _check_network(self, nc, net, result): - """Check the specified network. - - :param nc: network driver connection. - :param net: the name or ID of network to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - if net is None: - return - try: - net_obj = nc.network_get(net) - result[self.NETWORK] = net_obj.id - except exc.InternalError as ex: - return str(ex) - - def _check_port(self, nc, port, result): - """Check the specified port. - - :param nc: network driver connection. - :param port: the name or ID of port to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - if port is None: - return - - try: - port_obj = nc.port_find(port) - if port_obj.status != 'DOWN': - return _("The status of the port %(p)s must be DOWN" - ) % {'p': port} - result[self.PORT] = port_obj.id - return - except exc.InternalError as ex: - return str(ex) - - def _check_floating_ip(self, nc, net_spec, result): - """Check floating IP and network, if specified. - - :param nc: network driver connection. - :param net_spec: the specification to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - net = net_spec.get(self.FLOATING_NETWORK) - if net: - try: - net_obj = nc.network_get(net) - result[self.FLOATING_NETWORK] = net_obj.id - except exc.InternalError as ex: - return str(ex) - - flt_ip = net_spec.get(self.FLOATING_IP) - if not flt_ip: - return - - try: - # Find floating ip with this address - fip = nc.floatingip_find(flt_ip) - if fip: - if fip.status == 'ACTIVE': - return _('the floating IP %s has been used.') % flt_ip - result['floating_ip_id'] = fip.id - return - - # Create a floating IP with address if floating ip unspecified - if not net: - return _('Must specify a network to create a floating IP') - - result[self.FLOATING_IP] = flt_ip - return - except exc.InternalError as ex: - return str(ex) - - def _validate_network(self, obj, net_spec, reason=None): - - def _verify(error): - if error is None: - return - - if reason == 'create': - raise exc.EResourceCreation(type='server', message=error) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=error) - else: - raise exc.InvalidSpec(message=error) - - nc = self.network(obj) - result = {} - - # check network - net = net_spec.get(self.NETWORK) - error = self._check_network(nc, net, result) - _verify(error) - - # check port - port = net_spec.get(self.PORT) - error = self._check_port(nc, port, result) - _verify(error) - - if port is None and net is None: - _verify(_("One of '%(p)s' and '%(n)s' must be provided" - ) % {'p': self.PORT, 'n': self.NETWORK}) - - fixed_ip = net_spec.get(self.FIXED_IP) - if fixed_ip: - if port is not None: - _verify(_("The '%(p)s' property and the '%(fip)s' property " - "cannot be specified at the same time" - ) % {'p': self.PORT, 'fip': self.FIXED_IP}) - result[self.FIXED_IP] = fixed_ip - - # Check security_groups - error = self._check_security_groups(nc, net_spec, result) - _verify(error) - - # Check floating IP - error = self._check_floating_ip(nc, net_spec, result) - _verify(error) - - return result - - def _get_port(self, obj, net_spec): - """Fetch or create a port. - - :param obj: The node object. - :param net_spec: The parameters to create a port. - :returns: Created port object and error message. - """ - port_id = net_spec.get(self.PORT, None) - if port_id: - try: - port = self.network(obj).port_find(port_id) - return port, None - except exc.InternalError as ex: - return None, ex - port_attr = { - 'network_id': net_spec.get(self.NETWORK), - } - fixed_ip = net_spec.get(self.FIXED_IP, None) - if fixed_ip: - port_attr['fixed_ips'] = [fixed_ip] - security_groups = net_spec.get(self.PORT_SECURITY_GROUPS, []) - if security_groups: - port_attr['security_groups'] = security_groups - try: - port = self.network(obj).port_create(**port_attr) - return port, None - except exc.InternalError as ex: - return None, ex - - def _delete_ports(self, obj, ports): - """Delete ports. - - :param obj: The node object - :param ports: A list of internal ports. - :returns: None for succeed or error for failure. - """ - for port in ports: - # remove port created by senlin - if port.get('remove', False): - try: - self.network(obj).port_delete(port['id']) - # remove floating IP created by senlin - if port.get('floating', None) and port[ - 'floating'].get('remove', False): - self.network(obj).floatingip_delete( - port['floating']['id']) - except exc.InternalError as ex: - return ex - ports.remove(port) - node_data = obj.data - node_data['internal_ports'] = ports - node_obj.Node.update(self.context, obj.id, {'data': node_data}) - - def _get_floating_ip(self, obj, fip_spec, port_id): - """Find or Create a floating IP. - - :param obj: The node object. - :param fip_spec: The parameters to create a floating ip - :param port_id: The port ID to associate with - :returns: A floating IP object and error message. - """ - floating_ip_id = fip_spec.get('floating_ip_id', None) - if floating_ip_id: - try: - fip = self.network(obj).floatingip_find(floating_ip_id) - if fip.port_id is None: - attr = {'port_id': port_id} - fip = self.network(obj).floatingip_update(fip, **attr) - return fip, None - except exc.InternalError as ex: - return None, ex - net_id = fip_spec.get(self.FLOATING_NETWORK) - fip_addr = fip_spec.get(self.FLOATING_IP) - attr = { - 'port_id': port_id, - 'floating_network_id': net_id, - } - if fip_addr: - attr.update({'floating_ip_address': fip_addr}) - try: - fip = self.network(obj).floatingip_create(**attr) - return fip, None - except exc.InternalError as ex: - return None, ex - - def _create_ports_from_properties(self, obj, networks, action_type): - """Create or find ports based on networks property. - - :param obj: The node object. - :param networks: The networks property used for node. - :param action_type: Either 'create' or 'update'. - - :returns: A list of created port's attributes. - """ - internal_ports = obj.data.get('internal_ports', []) - if not networks: - return [] - - for net_spec in networks: - net = self._validate_network(obj, net_spec, action_type) - # Create port - port, ex = self._get_port(obj, net) - if ex: - d_ex = self._delete_ports(obj, internal_ports) - if d_ex: - raise d_ex - else: - raise ex - port_attrs = { - 'id': port.id, - 'network_id': port.network_id, - 'security_group_ids': port.security_group_ids, - 'fixed_ips': port.fixed_ips - } - if self.PORT not in net: - port_attrs.update({'remove': True}) - # Create floating ip - if 'floating_ip_id' in net or self.FLOATING_NETWORK in net: - fip, ex = self._get_floating_ip(obj, net, port_attrs['id']) - if ex: - d_ex = self._delete_ports(obj, internal_ports) - if d_ex: - raise d_ex - else: - raise ex - port_attrs['floating'] = { - 'id': fip.id, - 'floating_ip_address': fip.floating_ip_address, - 'floating_network_id': fip.floating_network_id, - } - if self.FLOATING_NETWORK in net: - port_attrs['floating'].update({'remove': True}) - internal_ports.append(port_attrs) - if internal_ports: - node_data = obj.data - node_data.update(internal_ports=internal_ports) - node_obj.Node.update(self.context, obj.id, {'data': node_data}) - return internal_ports - - def _build_metadata(self, obj, usermeta): - """Build custom metadata for server. - - :param obj: The node object to operate on. - :return: A dictionary containing the new metadata. - """ - metadata = usermeta or {} - metadata['cluster_node_id'] = obj.id - if obj.cluster_id: - metadata['cluster_id'] = obj.cluster_id - metadata['cluster_node_index'] = str(obj.index) - - return metadata - - def _update_zone_info(self, obj, server): - """Update the actual zone placement data. - - :param obj: The node object associated with this server. - :param server: The server object returned from creation. - """ - if server.availability_zone: - placement = obj.data.get('placement', None) - if not placement: - obj.data['placement'] = {'zone': server.availability_zone} - else: - obj.data['placement'].setdefault('zone', - server.availability_zone) - # It is safe to use admin context here - ctx = context.get_admin_context() - node_obj.Node.update(ctx, obj.id, {'data': obj.data}) - - def _preprocess_user_data(self, obj, extra=None): - """Get jinja2 parameters from metadata config. - - :param obj: The node object. - :param extra: The existing parameters to be merged. - :returns: jinja2 parameters to be used. - """ - def _to_json(astr): - try: - ret = jsonutils.loads(astr) - return ret - except (ValueError, TypeError): - return astr - - extra = extra or {} - n_config = _to_json(obj.metadata.get('config', {})) - # Check node's metadata - if isinstance(n_config, dict): - extra.update(n_config) - # Check cluster's metadata - if obj.cluster_id: - ctx = context.get_service_context( - user=obj.user, project=obj.project) - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - c_config = _to_json(cluster.metadata.get('config', {})) - if isinstance(c_config, dict): - extra.update(c_config) - return extra - - def do_create(self, obj): - """Create a server for the node object. - - :param obj: The node object for which a server will be created. - """ - kwargs = {} - for key in self.KEYS: - # context is treated as connection parameters - if key == self.CONTEXT: - continue - - if self.properties[key] is not None: - kwargs[key] = self.properties[key] - - admin_pass = self.properties[self.ADMIN_PASS] - if admin_pass: - kwargs.pop(self.ADMIN_PASS) - kwargs['adminPass'] = admin_pass - - auto_disk_config = self.properties[self.AUTO_DISK_CONFIG] - kwargs.pop(self.AUTO_DISK_CONFIG) - kwargs['OS-DCF:diskConfig'] = 'AUTO' if auto_disk_config else 'MANUAL' - - image_ident = self.properties[self.IMAGE] - if image_ident is not None: - image = self._validate_image(obj, image_ident, 'create') - kwargs.pop(self.IMAGE) - kwargs['imageRef'] = image.id - - flavor_ident = self.properties[self.FLAVOR] - flavor = self._validate_flavor(obj, flavor_ident, 'create') - kwargs.pop(self.FLAVOR) - kwargs['flavorRef'] = flavor.id - - keypair_name = self.properties[self.KEY_NAME] - if keypair_name: - keypair = self._validate_keypair(obj, keypair_name, 'create') - kwargs['key_name'] = keypair.name - - kwargs['name'] = self.properties[self.NAME] or obj.name - - metadata = self._build_metadata(obj, self.properties[self.METADATA]) - kwargs['metadata'] = metadata - - block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] - if block_device_mapping_v2 is not None: - kwargs['block_device_mapping_v2'] = self._resolve_bdm( - block_device_mapping_v2) - - jj_vars = {} - networks = self.properties[self.NETWORKS] - if networks is not None: - ports = self._create_ports_from_properties( - obj, networks, 'create') - jj_vars['ports'] = ports - kwargs['networks'] = [ - {'port': port['id']} for port in ports] - - # Get user_data parameters from metadata - jj_vars = self._preprocess_user_data(obj, jj_vars) - - user_data = self.properties[self.USER_DATA] - if user_data is not None: - # Use jinja2 to replace variables defined in user_data - try: - jj_t = jinja2.Template(user_data) - user_data = jj_t.render(**jj_vars) - except (jinja2.exceptions.UndefinedError, ValueError) as ex: - # TODO(anyone) Handle jinja2 error - pass - ud = encodeutils.safe_encode(user_data) - kwargs['user_data'] = encodeutils.safe_decode( - base64.b64encode(ud)) - - secgroups = self.properties[self.SECURITY_GROUPS] - if secgroups: - kwargs['security_groups'] = [{'name': sg} for sg in secgroups] - - if 'placement' in obj.data: - if 'zone' in obj.data['placement']: - kwargs['availability_zone'] = obj.data['placement']['zone'] - - if 'servergroup' in obj.data['placement']: - group_id = obj.data['placement']['servergroup'] - hints = self.properties.get(self.SCHEDULER_HINTS, {}) - hints.update({'group': group_id}) - kwargs['scheduler_hints'] = hints - - server = None - resource_id = 'UNKNOWN' - try: - server = self.compute(obj).server_create(**kwargs) - self.compute(obj).wait_for_server(server.id) - # Update zone placement info if available - self._update_zone_info(obj, server) - return server.id - except exc.InternalError as ex: - if server and server.id: - resource_id = server.id - raise exc.EResourceCreation(type='server', - message=str(ex), - resource_id=resource_id) - - def do_delete(self, obj, **params): - """Delete the physical resource associated with the specified node. - - :param obj: The node object to operate on. - :param kwargs params: Optional keyword arguments for the delete - operation. - :returns: This operation always return True unless exception is - caught. - :raises: `EResourceDeletion` if interaction with compute service fails. - """ - internal_ports = obj.data.get('internal_ports', []) - if not obj.physical_id: - return True - - server_id = obj.physical_id - ignore_missing = params.get('ignore_missing', True) - force = params.get('force', False) - - try: - driver = self.compute(obj) - if force: - driver.server_force_delete(server_id, ignore_missing) - else: - driver.server_delete(server_id, ignore_missing) - driver.wait_for_server_delete(server_id) - if internal_ports: - ex = self._delete_ports(obj, internal_ports) - if ex: - raise ex - return True - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='server', id=server_id, - message=str(ex)) - - def _check_server_name(self, obj, profile): - """Check if there is a new name to be assigned to the server. - - :param obj: The node object to operate on. - :param new_profile: The new profile which may contain a name for - the server instance. - :return: A tuple consisting a boolean indicating whether the name - needs change and the server name determined. - """ - old_name = self.properties[self.NAME] or obj.name - new_name = profile.properties[self.NAME] or obj.name - if old_name == new_name: - return False, new_name - return True, new_name - - def _update_name(self, obj, new_name): - """Update the name of the server. - - :param obj: The node object to operate. - :param new_name: The new name for the server instance. - :return: ``None``. - :raises: ``EResourceUpdate``. - """ - try: - self.compute(obj).server_update(obj.physical_id, name=new_name) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _check_password(self, obj, new_profile): - """Check if the admin password has been changed in the new profile. - - :param obj: The server node to operate, not used currently. - :param new_profile: The new profile which may contain a new password - for the server instance. - :return: A tuple consisting a boolean indicating whether the password - needs a change and the password determined which could be - '' if new password is not set. - """ - old_passwd = self.properties.get(self.ADMIN_PASS) or '' - new_passwd = new_profile.properties[self.ADMIN_PASS] or '' - if old_passwd == new_passwd: - return False, new_passwd - return True, new_passwd - - def _update_password(self, obj, new_password): - """Update the admin password for the server. - - :param obj: The node object to operate. - :param new_password: The new password for the server instance. - :return: ``None``. - :raises: ``EResourceUpdate``. - """ - try: - self.compute(obj).server_change_password(obj.physical_id, - new_password) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_metadata(self, obj, new_profile): - """Update the server metadata. - - :param obj: The node object to operate on. - :param new_profile: The new profile that may contain some changes to - the metadata. - :returns: ``None`` - :raises: `EResourceUpdate`. - """ - old_meta = self._build_metadata(obj, self.properties[self.METADATA]) - new_meta = self._build_metadata(obj, - new_profile.properties[self.METADATA]) - if new_meta == old_meta: - return - - try: - self.compute(obj).server_metadata_update(obj.physical_id, new_meta) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_flavor(self, obj, new_profile): - """Update server flavor. - - :param obj: The node object to operate on. - :param old_flavor: The identity of the current flavor. - :param new_flavor: The identity of the new flavor. - :returns: ``None``. - :raises: `EResourceUpdate` when operation was a failure. - """ - old_flavor = self.properties[self.FLAVOR] - new_flavor = new_profile.properties[self.FLAVOR] - cc = self.compute(obj) - oldflavor = self._validate_flavor(obj, old_flavor, 'update') - newflavor = self._validate_flavor(obj, new_flavor, 'update') - if oldflavor.id == newflavor.id: - return - - try: - cc.server_resize(obj.physical_id, newflavor.id) - cc.wait_for_server(obj.physical_id, 'VERIFY_RESIZE') - except exc.InternalError as ex: - msg = str(ex) - try: - cc.server_resize_revert(obj.physical_id) - cc.wait_for_server(obj.physical_id, 'ACTIVE') - except exc.InternalError as ex1: - msg = str(ex1) - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - - try: - cc.server_resize_confirm(obj.physical_id) - cc.wait_for_server(obj.physical_id, 'ACTIVE') - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_image(self, obj, new_profile, new_name, new_password): - """Update image used by server node. - - :param obj: The node object to operate on. - :param new_profile: The profile which may contain a new image name or - ID to use. - :param new_name: The name for the server node. - :param newn_password: The new password for the administrative account - if provided. - :returns: A boolean indicating whether the image needs an update. - :raises: ``InternalError`` if operation was a failure. - """ - old_image = self.properties[self.IMAGE] - new_image = new_profile.properties[self.IMAGE] - if not new_image: - msg = _("Updating Nova server with image set to None is not " - "supported by Nova") - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - # check the new image first - img_new = self._validate_image(obj, new_image, reason='update') - new_image_id = img_new.id - - driver = self.compute(obj) - if old_image: - img_old = self._validate_image(obj, old_image, reason='update') - old_image_id = img_old.id - else: - try: - server = driver.server_get(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - # Still, this 'old_image_id' could be empty, but it doesn't matter - # because the comparison below would fail if that is the case - old_image_id = server.image.get('id', None) - - if new_image_id == old_image_id: - return False - - try: - driver.server_rebuild(obj.physical_id, new_image_id, - new_name, new_password) - driver.wait_for_server(obj.physical_id, 'ACTIVE') - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - return True - - def _update_network_add_port(self, obj, networks): - """Create new interfaces for the server node. - - :param obj: The node object to operate. - :param networks: A list containing information about new network - interfaces to be created. - :returns: ``None``. - :raises: ``EResourceUpdate`` if interaction with drivers failed. - """ - cc = self.compute(obj) - try: - server = cc.server_get(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - ports = self._create_ports_from_properties( - obj, networks, 'update') - for port in ports: - params = {'port': port['id']} - try: - cc.server_interface_create(server, **params) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', - id=obj.physical_id, - message=str(ex)) - - def _find_port_by_net_spec(self, obj, net_spec, ports): - """Find existing ports match with specific network properties. - - :param obj: The node object. - :param net_spec: Network property of this profile. - :param ports: A list of ports which attached to this server. - :returns: A list of candidate ports matching this network spec. - """ - # TODO(anyone): handle security_groups - net = self._validate_network(obj, net_spec, 'update') - selected_ports = [] - for p in ports: - floating = p.get('floating', {}) - floating_network = net.get(self.FLOATING_NETWORK, None) - if floating_network and floating.get( - 'floating_network_id') != floating_network: - continue - floating_ip_address = net.get(self.FLOATING_IP, None) - if floating_ip_address and floating.get( - 'floating_ip_address') != floating_ip_address: - continue - # If network properties didn't contain floating ip, - # then we should better not make a port with floating ip - # as candidate. - if (floating and not floating_network and not floating_ip_address): - continue - port_id = net.get(self.PORT, None) - if port_id and p['id'] != port_id: - continue - fixed_ip = net.get(self.FIXED_IP, None) - if fixed_ip: - fixed_ips = [ff['ip_address'] for ff in p['fixed_ips']] - if fixed_ip not in fixed_ips: - continue - network = net.get(self.NETWORK, None) - if network: - net_id = self.network(obj).network_get(network).id - if p['network_id'] != net_id: - continue - selected_ports.append(p) - return selected_ports - - def _update_network_remove_port(self, obj, networks): - """Delete existing interfaces from the node. - - :param obj: The node object to operate. - :param networks: A list containing information about network - interfaces to be created. - :returns: ``None`` - :raises: ``EResourceUpdate`` - """ - cc = self.compute(obj) - nc = self.network(obj) - internal_ports = obj.data.get('internal_ports', []) - - for n in networks: - candidate_ports = self._find_port_by_net_spec( - obj, n, internal_ports) - port = candidate_ports[0] - try: - # Detach port from server - cc.server_interface_delete(port['id'], obj.physical_id) - # delete port if created by senlin - if port.get('remove', False): - nc.port_delete(port['id'], ignore_missing=True) - # delete floating IP if created by senlin - if (port.get('floating', None) and - port['floating'].get('remove', False)): - nc.floatingip_delete(port['floating']['id'], - ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - internal_ports.remove(port) - obj.data['internal_ports'] = internal_ports - node_obj.Node.update(self.context, obj.id, {'data': obj.data}) - - def _update_network(self, obj, new_profile): - """Updating server network interfaces. - - :param obj: The node object to operate. - :param new_profile: The new profile which may contain new network - settings. - :return: ``None`` - :raises: ``EResourceUpdate`` if there are driver failures. - """ - networks_current = self.properties[self.NETWORKS] - networks_create = new_profile.properties[self.NETWORKS] - networks_delete = copy.deepcopy(networks_current) - for network in networks_current: - if network in networks_create: - networks_create.remove(network) - networks_delete.remove(network) - - # Detach some existing interfaces - if networks_delete: - self._update_network_remove_port(obj, networks_delete) - - # Attach new interfaces - if networks_create: - self._update_network_add_port(obj, networks_create) - return - - def do_update(self, obj, new_profile=None, **params): - """Perform update on the server. - - :param obj: the server to operate on - :param new_profile: the new profile for the server. - :param params: a dictionary of optional parameters. - :returns: True if update was successful or False otherwise. - :raises: `EResourceUpdate` if operation fails. - """ - self.server_id = obj.physical_id - if not self.server_id: - return False - - if not new_profile: - return False - - if not self.validate_for_update(new_profile): - return False - - name_changed, new_name = self._check_server_name(obj, new_profile) - passwd_changed, new_passwd = self._check_password(obj, new_profile) - # Update server image: may have side effect of changing server name - # and/or admin password - image_changed = self._update_image(obj, new_profile, new_name, - new_passwd) - if not image_changed: - # we do this separately only when rebuild wasn't performed - if name_changed: - self._update_name(obj, new_name) - if passwd_changed: - self._update_password(obj, new_passwd) - - # Update server flavor: note that flavor is a required property - self._update_flavor(obj, new_profile) - self._update_network(obj, new_profile) - - # TODO(Yanyan Hu): Update block_device properties - # Update server metadata - self._update_metadata(obj, new_profile) - - return True - - def do_get_details(self, obj): - known_keys = { - 'OS-DCF:diskConfig', - 'OS-EXT-AZ:availability_zone', - 'OS-EXT-STS:power_state', - 'OS-EXT-STS:vm_state', - 'accessIPv4', - 'accessIPv6', - 'config_drive', - 'created', - 'hostId', - 'id', - 'key_name', - 'locked', - 'metadata', - 'name', - 'os-extended-volumes:volumes_attached', - 'progress', - 'status', - 'updated' - } - if obj.physical_id is None or obj.physical_id == '': - return {} - - driver = self.compute(obj) - try: - server = driver.server_get(obj.physical_id) - except exc.InternalError as ex: - return { - 'Error': { - 'code': ex.code, - 'message': str(ex) - } - } - - if server is None: - return {} - server_data = server.to_dict() - details = { - 'image': server_data['image']['id'], - 'flavor': server_data['flavor']['id'], - } - for key in known_keys: - if key in server_data: - details[key] = server_data[key] - - # process special keys like 'OS-EXT-STS:task_state': these keys have - # a default value '-' when not existing - special_keys = [ - 'OS-EXT-STS:task_state', - 'OS-SRV-USG:launched_at', - 'OS-SRV-USG:terminated_at', - ] - for key in special_keys: - if key in server_data: - val = server_data[key] - details[key] = val if val else '-' - - # process network addresses - details['addresses'] = copy.deepcopy(server_data['addresses']) - - # process security groups - sgroups = [] - if 'security_groups' in server_data: - for sg in server_data['security_groups']: - sgroups.append(sg['name']) - if len(sgroups) == 0: - details['security_groups'] = '' - elif len(sgroups) == 1: - details['security_groups'] = sgroups[0] - else: - details['security_groups'] = sgroups - - return dict((k, details[k]) for k in sorted(details)) - - def do_join(self, obj, cluster_id): - if not obj.physical_id: - return False - - driver = self.compute(obj) - metadata = driver.server_metadata_get(obj.physical_id) or {} - metadata['cluster_id'] = cluster_id - metadata['cluster_node_index'] = str(obj.index) - driver.server_metadata_update(obj.physical_id, metadata) - return super(ServerProfile, self).do_join(obj, cluster_id) - - def do_leave(self, obj): - if not obj.physical_id: - return False - - keys = ['cluster_id', 'cluster_node_index'] - self.compute(obj).server_metadata_delete(obj.physical_id, keys) - return super(ServerProfile, self).do_leave(obj) - - def do_check(self, obj): - if not obj.physical_id: - return False - - try: - server = self.compute(obj).server_get(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='checking', type='server', - id=obj.physical_id, - message=str(ex)) - - if (server is None or server.status != 'ACTIVE'): - return False - - return True - - def do_recover(self, obj, **options): - """Handler for recover operation. - - :param obj: The node object. - :param dict options: A list for operations each of which has a name - and optionally a map from parameter to values. - """ - operation = options.get('operation', None) - - if operation and not isinstance(operation, str): - operation = operation[0] - - op_name = operation['name'] - if op_name.upper() != consts.RECOVER_RECREATE: - op_params = operation.get('params', {}) - if op_name.lower() not in self.OP_NAMES: - LOG.error("The operation '%s' is not supported", op_name) - return False - - method = getattr(self, "handle_" + op_name.lower()) - return method(obj, **op_params) - - return super(ServerProfile, self).do_recover(obj, **options) - - def handle_reboot(self, obj, **options): - """Handler for the reboot operation.""" - if not obj.physical_id: - return False - - reboot_type = options.get(self.REBOOT_TYPE, self.REBOOT_SOFT) - if (not isinstance(reboot_type, str) or - reboot_type not in self.REBOOT_TYPES): - return False - - self.compute(obj).server_reboot(obj.physical_id, reboot_type) - self.compute(obj).wait_for_server(obj.physical_id, 'ACTIVE') - return True - - def handle_rebuild(self, obj, **options): - if not obj.physical_id: - return False - - server_id = obj.physical_id - driver = self.compute(obj) - try: - server = driver.server_get(server_id) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='rebuilding', type='server', - id=server_id, - message=str(ex)) - - if server is None or server.image is None: - return False - - image_id = server.image['id'] - admin_pass = self.properties.get(self.ADMIN_PASS) - try: - driver.server_rebuild(server_id, image_id, - self.properties.get(self.NAME), - admin_pass) - driver.wait_for_server(server_id, 'ACTIVE') - except exc.InternalError as ex: - raise exc.EResourceOperation(op='rebuilding', type='server', - id=server_id, - message=str(ex)) - return True - - def handle_change_password(self, obj, **options): - """Handler for the change_password operation.""" - if not obj.physical_id: - return False - - password = options.get(self.ADMIN_PASSWORD, None) - if (password is None or not isinstance(password, str)): - return False - - self.compute(obj).server_change_password(obj.physical_id, password) - return True diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index 1026aeb3e..000000000 --- a/devstack/README.rst +++ /dev/null @@ -1,21 +0,0 @@ -=========================== -Enabling senlin in DevStack -=========================== - -1. Download DevStack:: - - $ git clone https://git.openstack.org/openstack-dev/devstack - $ cd devstack - -2. Add following repo as external repositories into your ``local.conf`` file:: - - [[local|localrc]] - #Enable senlin - enable_plugin senlin https://git.openstack.org/openstack/senlin - #Enable senlin-dashboard - enable_plugin senlin-dashboard https://git.openstack.org/openstack/senlin-dashboard - -Optionally, you can add a line ``SENLIN_USE_MOD_WSGI=True`` to the same ``local.conf`` -file if you prefer running the Senlin API service under Apache. - -3. Run ``./stack.sh``. diff --git a/devstack/files/apache-senlin-api.template b/devstack/files/apache-senlin-api.template deleted file mode 100644 index 89b266925..000000000 --- a/devstack/files/apache-senlin-api.template +++ /dev/null @@ -1,28 +0,0 @@ - - Require all granted - - - - WSGIDaemonProcess senlin-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup senlin-api - WSGIScriptAlias / %SENLIN_BIN_DIR%/senlin-wsgi-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes on - = 2.4> - ErrorLogFormat "%M" - - ErrorLog /var/log/%APACHE_NAME%/senlin-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - -Alias /cluster %SENLIN_BIN_DIR%/senlin-wsgi-api - - SetHandler wsgi-script - Options +ExecCGI - WSGIProcessGroup senlin-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/devstack/lib/senlin b/devstack/lib/senlin deleted file mode 100644 index 97ccfbd6e..000000000 --- a/devstack/lib/senlin +++ /dev/null @@ -1,329 +0,0 @@ -#!/bin/bash -# -# lib/senlin -# Install and start **Senlin** service - -# To enable, add the following to local.conf -# -# [[local|localrc]] -# enable_plugin senlin https://git.openstack.org/openstack/senlin - -# Dependencies: -# -# - functions -# - HORIZON_DIR - -# stack.sh -# --------- -# - config_senlin_dashboard -# - configure_senlin -# - cleanup_senlin -# - cleanup_senlin_dashboard -# - create_senlin_cache_dir -# - create_senlin_accounts -# - init_senlin -# - install_senlinclient -# - install_senlin -# - install_senlin_dashboard -# - is_senlin_enabled -# - start_senlin -# - stop_senlin - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# set up default -SENLIN_AUTH_CACHE_DIR=${SENLIN_AUTH_CACHE_DIR:-/var/cache/senlin} -SENLIN_CONF_DIR=/etc/senlin -SENLIN_CONF=$SENLIN_CONF_DIR/senlin.conf -SENLIN_API_HOST=${SENLIN_API_HOST:-$SERVICE_HOST} -SENLIN_WSGI_MODE=${SENLIN_WSGI_MODE:-"uwsgi"} - -SENLIN_DIR=$DEST/senlin -if [[ ${USE_VENV} = True ]]; then - PROJECT_VENV["senlin"]=${SENLIN_DIR}.venv - SENLIN_BIN_DIR=${PROJECT_VENV["senlin"]}/bin -else - SENLIN_BIN_DIR=$(get_python_exec_prefix) -fi -SENLIN_REPO=${SENLIN_REPO:-${GIT_BASE}/openstack/senlin.git} -SENLIN_BRANCH=${SENLIN_BRANCH:-master} - -SENLINCLIENT_DIR=$DEST/python-senlinclient -SENLINCLIENT_REPO=${SENLINCLIENT_REPO:-${GIT_BASE}/openstack/python-senlinclient.git} -SENLINCLIENT_BRANCH=${SENLINCLIENT_BRANCH:-master} - -SENLIN_DASHBOARD_DIR=$DEST/senlin-dashboard -SENLIN_DASHBOARD_REPO=${SENLIN_DASHBOARD_REPO:-${GIT_BASE}/openstack/senlin-dashboard.git} -SENLIN_DASHBOARD_BRANCH=${SENLIN_DASHBOARD_BRANCH:-master} - -SENLIN_UWSGI=$SENLIN_BIN_DIR/senlin-wsgi-api -SENLIN_UWSGI_CONF=$SENLIN_CONF_DIR/senlin-api-uwsgi.ini - -if is_service_enabled tls-proxy; then - SENLIN_SERVICE_PROTOCOL="https" -fi - -SENLIN_SERVICE_PROTOCOL=${SENLIN_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -# Functions -# --------- - -# Test if any Senlin services are enabled -function is_senlin_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"sl-" ]] && return 0 - return 1 -} - -# cleanup_senlin() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_senlin { - sudo rm -f $(apache_site_config_for senlin-api) - remove_uwsgi_config "$SENLIN_UWSGI_CONF" "$SENLIN_UWSGI" - sudo rm -rf $SENLIN_AUTH_CACHE_DIR - sudo rm -rf $SENLIN_CONF_DIR -} - -# configure_senlin() - Set config files, create data dirs, etc -function configure_senlin { - if [[ ! -d $SENLIN_CONF_DIR ]]; then - sudo mkdir -p $SENLIN_CONF_DIR - fi - - sudo chown $STACK_USER $SENLIN_CONF_DIR - - sudo install -d -o $STACK_USER $SENLIN_CONF_DIR - - SENLIN_API_PASTE_FILE=$SENLIN_CONF_DIR/api-paste.ini - - cp $SENLIN_DIR/etc/senlin/api-paste.ini $SENLIN_API_PASTE_FILE - - # common options - iniset $SENLIN_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - iniset $SENLIN_CONF DEFAULT auth_encryption_key $(generate_hex_string 16) - iniset $SENLIN_CONF DEFAULT default_region_name "$REGION_NAME" - - - if [ "$USE_SYSTEMD" != "False" ]; then - setup_systemd_logging $SENLIN_CONF - fi - - if [ "$LOG_COLOR" == "True" ] && [ "$USE_SYSTEMD" == "False" ]; then - # Add color to logging output - setup_colorized_logging $SENLIN_CONF DEFAULT - fi - - # rpc - iniset_rpc_backend senlin $SENLIN_CONF - - # Database connection - iniset $SENLIN_CONF database connection `database_connection_url senlin` - - # Keystone authtoken middleware - #configure_auth_token_middleware $SENLIN_CONF senlin $SENLIN_AUTH_CACHE_DIR - iniset $SENLIN_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE - iniset $SENLIN_CONF keystone_authtoken auth_url $KEYSTONE_AUTH_URI - iniset $SENLIN_CONF keystone_authtoken username senlin - iniset $SENLIN_CONF keystone_authtoken password $SERVICE_PASSWORD - iniset $SENLIN_CONF keystone_authtoken project_name $SERVICE_TENANT_NAME - iniset $SENLIN_CONF keystone_authtoken project_domain_name Default - iniset $SENLIN_CONF keystone_authtoken user_domain_name Default - iniset $SENLIN_CONF keystone_authtoken auth_type password - iniset $SENLIN_CONF keystone_authtoken service_token_roles_required True - iniset $SENLIN_CONF keystone_authtoken interface public - - # Senlin service credentials - iniset $SENLIN_CONF authentication auth_url $KEYSTONE_AUTH_URI/v3 - iniset $SENLIN_CONF authentication service_username senlin - iniset $SENLIN_CONF authentication service_password $SERVICE_PASSWORD - iniset $SENLIN_CONF authentication service_project_name $SERVICE_TENANT_NAME - - # Senlin Conductor options - iniset $SENLIN_CONF conductor workers $API_WORKERS - - # Senlin Conductor options - iniset $SENLIN_CONF engine workers $API_WORKERS - - # Senlin Health-Manager options - iniset $SENLIN_CONF health_manager workers $API_WORKERS - - # Zaqar options for message receiver - iniset $SENLIN_CONF zaqar auth_type password - iniset $SENLIN_CONF zaqar username zaqar - iniset $SENLIN_CONF zaqar password $SERVICE_PASSWORD - iniset $SENLIN_CONF zaqar project_name $SERVICE_TENANT_NAME - iniset $SENLIN_CONF zaqar auth_url $KEYSTONE_AUTH_URI/v3 - iniset $SENLIN_CONF zaqar user_domain_name Default - iniset $SENLIN_CONF zaqar project_domain_name Default - - if [[ "$SENLIN_WSGI_MODE" == "uwsgi" ]]; then - write_uwsgi_config "$SENLIN_UWSGI_CONF" "$SENLIN_UWSGI" "/cluster" - else - _config_senlin_apache_wsgi - fi -} - -# _config_senlin_apache_wsgi() - Configure mod_wsgi -function _config_senlin_apache_wsgi { - local senlin_api_apache_conf - local venv_path="" - local senlin_bin_dir="" - senlin_bin_dir=$(get_python_exec_prefix) - senlin_api_apache_conf=$(apache_site_config_for senlin-api) - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["senlin"]}/lib/$(python_version)/site-packages" - senlin_bin_dir=${PROJECT_VENV["senlin"]}/bin - fi - - sudo cp $SENLIN_DIR/devstack/files/apache-senlin-api.template $senlin_api_apache_conf - sudo sed -e " - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%SENLIN_BIN_DIR%|$SENLIN_BIN_DIR|g; - s|%SSLENGINE%|$senlin_ssl|g; - s|%SSLCERTFILE%|$senlin_certfile|g; - s|%SSLKEYFILE%|$senlin_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g; - s|%APIWORKERS%|$API_WORKERS|g; - " -i $senlin_api_apache_conf -} - -# init_senlin() - Initialize database -function init_senlin { - # (re)create senlin database - recreate_database senlin utf8 - - if [[ "$USE_SQLALCHEMY_LATEST" == "True" ]]; then - pip3 install --upgrade alembic sqlalchemy - fi - - $SENLIN_BIN_DIR/senlin-manage db_sync - create_senlin_cache_dir -} - -# create_senlin_cache_dir() - Part of the init_senlin() process -function create_senlin_cache_dir { - # Create cache dirs - sudo mkdir -p $SENLIN_AUTH_CACHE_DIR - sudo install -d -o $STACK_USER $SENLIN_AUTH_CACHE_DIR -} - -# install_senlinclient() - Collect source and prepare -function install_senlinclient { - if use_library_from_git "python-senlinclient"; then - git_clone $SENLINCLIENT_REPO $SENLINCLIENT_DIR $SENLINCLIENT_BRANCH - setup_develop $SENLINCLIENT_DIR - else - pip_install --upgrade python-senlinclient - fi -} - -# install_senlin_dashboard() - Collect source and prepare -function install_senlin_dashboard { - # NOTE(Liuqing): workaround for devstack bug: 1540328 - # https://bugs.launchpad.net/devstack/+bug/1540328 - # where devstack install 'test-requirements' but should not do it - # for senlin-dashboard project as it installs Horizon from url. - # Remove following two 'mv' commands when mentioned bug is fixed. - if use_library_from_git "senlin-dashboard"; then - git_clone $SENLIN_DASHBOARD_REPO $SENLIN_DASHBOARD_DIR $SENLIN_DASHBOARD_BRANCH - mv $SENLIN_DASHBOARD_DIR/test-requirements.txt $SENLIN_DASHBOARD_DIR/_test-requirements.txt - setup_develop $SENLIN_DASHBOARD_DIR - mv $SENLIN_DASHBOARD_DIR/_test-requirements.txt $SENLIN_DASHBOARD_DIR/test-requirements.txt - else - pip_install --upgrade senlin-dashboard - fi -} - -# configure_senlin_dashboard() - Set config files -function config_senlin_dashboard { - # Install Senlin Dashboard as plugin for Horizon - ln -sf $SENLIN_DASHBOARD_DIR/senlin_dashboard/enabled/_50_senlin.py $HORIZON_DIR/openstack_dashboard/local/enabled/_50_senlin.py - # Enable senlin policy - ln -sf $SENLIN_DASHBOARD_DIR/senlin_dashboard/conf/senlin_policy.json $HORIZON_DIR/openstack_dashboard/conf/senlin_policy.json -} - -# cleanup_senlin_dashboard() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_senlin_dashboard { - sudo rm -rf $HORIZON_DIR/openstack_dashboard/local/enabled/_50_senlin.py - sudo rm -rf $HORIZON_DIR/openstack_dashboard/conf/senlin_policy.json -} - -# install_senlin() - Collect source and prepare -function install_senlin { - if [[ "$SENLIN_WSGI_MODE" == "uwsgi" ]]; then - install_apache_uwsgi - else - install_apache_wsgi - fi - - git_clone $SENLIN_REPO $SENLIN_DIR $SENLIN_BRANCH - setup_develop $SENLIN_DIR -} - -# start_senlin() - Start running processes, including screen -function start_senlin { - run_process sl-eng "$SENLIN_BIN_DIR/senlin-engine --config-file=$SENLIN_CONF" - run_process sl-conductor "$SENLIN_BIN_DIR/senlin-conductor --config-file=$SENLIN_CONF" - run_process sl-health-manager "$SENLIN_BIN_DIR/senlin-health-manager --config-file=$SENLIN_CONF" - - if [[ "$SENLIN_WSGI_MODE" == "uwsgi" ]]; then - run_process sl-api "$(which uwsgi) --procname-prefix senlin-api --ini $SENLIN_UWSGI_CONF" - else - enable_apache_site senlin-api - restart_apache_server - tail_log senlin-api /var/log/$APACHE_NAME/senlin-api.log - fi - - echo "Waiting for senlin-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $SENLIN_SERVICE_PROTOCOL://$SENLIN_API_HOST/cluster; then - die $LINENO "senlin-api did not start" - fi -} - -# stop_senlin() - Stop running processes -function stop_senlin { - # Kill the screen windows - stop_process sl-eng - stop_process sl-conductor - stop_process sl-health-manager - - if [[ "$SENLIN_WSGI_MODE" == "uwsgi" ]]; then - stop_process sl-api - else - disable_apache_site senlin-api - restart_apache_server - fi -} - -# create_senlin_accounts() - Set up common required senlin accounts -function create_senlin_accounts { - create_service_user "senlin" - - local senlin_api_url="$SENLIN_SERVICE_PROTOCOL://$SENLIN_API_HOST/cluster" - - get_or_create_service "senlin" "clustering" "Senlin Clustering Service" - get_or_create_endpoint "clustering" \ - "$REGION_NAME" \ - "$senlin_api_url" \ - "$senlin_api_url" \ - "$senlin_api_url" - - # get or add 'service' role to 'senlin' on 'demo' project - get_or_add_user_project_role "service" "senlin" "demo" -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index d5b9bad91..000000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,58 +0,0 @@ -# senlin.sh - Devstack extras script to install senlin - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - -echo_summary "senlin's plugin.sh was called..." -. $DEST/senlin/devstack/lib/senlin -(set -o posix; set) - -if is_service_enabled sl-api sl-eng; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing senlin" - install_senlin - echo_summary "Installing senlinclient" - install_senlinclient - if is_service_enabled horizon; then - echo_summary "Installing senlin dashboard" - install_senlin_dashboard - fi - cleanup_senlin - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring senlin" - configure_senlin - - if is_service_enabled horizon; then - echo_summary "Configuring senlin dashboard" - config_senlin_dashboard - fi - - if is_service_enabled key; then - create_senlin_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize senlin - init_senlin - - # Start the senlin API and senlin taskmgr components - echo_summary "Starting senlin" - start_senlin - fi - - if [[ "$1" == "unstack" ]]; then - stop_senlin - fi - - if [[ "$1" == "clean" ]]; then - cleanup_senlin - - if is_service_enabled horizon; then - cleanup_senlin_dashboard - fi - fi -fi - -# Restore xtrace -$XTRACE diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index b97367d4d..000000000 --- a/devstack/settings +++ /dev/null @@ -1,6 +0,0 @@ -# Devstack settings - -# We have to add Senlin to enabled services for screen_it to work -# It consists of 2 parts: sl-api (API), sl-eng (Engine). - -enable_service sl-api sl-eng sl-conductor sl-health-manager diff --git a/doc/.gitignore b/doc/.gitignore deleted file mode 100644 index 6438f1c05..000000000 --- a/doc/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -target/ -build/ diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 32b2cee35..000000000 --- a/doc/Makefile +++ /dev/null @@ -1,159 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " xml to make Docutils-native XML files" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Heat.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Heat.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/Heat" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Heat" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The xml files are in $(BUILDDIR)/xml." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/README.rst b/doc/README.rst deleted file mode 100644 index fa627d005..000000000 --- a/doc/README.rst +++ /dev/null @@ -1,55 +0,0 @@ -=========================== -Building the developer docs -=========================== - -Dependencies -============ - -You'll need to install python *Sphinx* package and *oslosphinx* -package: - -:: - - sudo pip install sphinx oslosphinx - -If you are using the virtualenv you'll need to install them in the -virtualenv. - -Get Help -======== - -Just type make to get help: - -:: - - make - -It will list available build targets. - -Build Doc -========= - -To build the man pages: - -:: - - make man - -To build the developer documentation as HTML: - -:: - - make html - -Type *make* for more formats. - -Test Doc -======== - -If you modify doc files, you can type: - -:: - - make doctest - -to check whether the format has problem. diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 7bd856e2b..000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -# this is required for the docs build jobs -openstackdocstheme>=2.2.1 # Apache-2.0 -os-api-ref>=1.4.0 # Apache-2.0 -sphinx>=2.0.0,!=2.1.0 # BSD -reno>=3.1.0 # Apache-2.0 diff --git a/doc/source/admin/authentication.rst b/doc/source/admin/authentication.rst deleted file mode 100644 index 908ff109e..000000000 --- a/doc/source/admin/authentication.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============== -Authentication -============== - -(TBD) - -This document describes the authentication model used by Senlin. diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 5aa1b21b7..000000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================== -Administering Senlin -==================== - -.. toctree:: - :maxdepth: 1 - - authentication diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 64b63374b..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,109 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - - -sys.path.insert(0, os.path.abspath('../..')) - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) - -sys.path.insert(0, ROOT) -sys.path.insert(0, BASE_DIR) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.todo', - 'sphinx.ext.graphviz', - 'sphinx.ext.intersphinx', - 'openstackdocstheme', - 'oslo_config.sphinxext', - 'oslo_policy.sphinxext', - 'oslo_policy.sphinxpolicygen', - 'ext.resources' -] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/senlin' -openstackdocs_bug_project = 'senlin' -openstackdocs_bug_tag = '' - -policy_generator_config_file = ( - '../../tools/policy-generator.conf' -) -sample_policy_basename = '_static/senlin' - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'senlin' -copyright = '2015, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for HTML output -------------------------------------------------- - -# html_static_path = ['static'] - -# The theme to use for HTML and HTML Help pages. See the documentation for a -# list of builtin themes. -html_theme = 'openstackdocs' - -# Add any paths that contain custom themes here, relative to this directory -# html_theme_path = [] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - '%s Documentation' % project, - 'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -# intersphinx_mapping = {'http://docs.python.org/': None} - -suppress_warnings = ['ref.option'] - -[extensions] -# todo_include_todos = True diff --git a/doc/source/configuration/config.rst b/doc/source/configuration/config.rst deleted file mode 100644 index 3ecfafc3e..000000000 --- a/doc/source/configuration/config.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -===================== -Configuration Options -===================== - -Senlin uses `oslo.config` to define and manage configuration options to -allow the deployer to control many aspects of the service API and the service -engine. - -.. show-options:: senlin.conf - -Options -======= - -.. currentmodule:: senlin.conf.opts - -.. autofunction:: list_opts \ No newline at end of file diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index 1debb5324..000000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -==================== -Senlin Configuration -==================== - -.. toctree:: - :maxdepth: 2 - - config - policy - sample-policy-yaml diff --git a/doc/source/configuration/policy.rst b/doc/source/configuration/policy.rst deleted file mode 100644 index c4d4a7bfe..000000000 --- a/doc/source/configuration/policy.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======================================= -Senlin Sample Policy Configuration File -======================================= - -.. warning:: - - JSON formatted policy file is deprecated since Senlin 11.0.0 (Wallaby). - This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing - JSON-formatted policy file to YAML in a backward-compatible way. - -.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html - -The following is an overview of all available access policies in Senlin. -For a sample configuration file, refer to :doc:`sample-policy-yaml`. - -.. show-policy:: - :config-file: ../../tools/policy-generator.conf diff --git a/doc/source/configuration/sample-policy-yaml.rst b/doc/source/configuration/sample-policy-yaml.rst deleted file mode 100644 index e397b81f7..000000000 --- a/doc/source/configuration/sample-policy-yaml.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -=========== -policy.yaml -=========== - -.. warning:: - - JSON formatted policy file is deprecated since Senlin 11.0.0 (Wallaby). - This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing - JSON-formatted policy file to YAML in a backward-compatible way. - -.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html - -Use the ``policy.yaml`` file to define additional access controls that will be -applied to Senlin: - -.. literalinclude:: ../_static/senlin.policy.yaml.sample diff --git a/doc/source/contributor/action.rst b/doc/source/contributor/action.rst deleted file mode 100644 index ba87d1bd4..000000000 --- a/doc/source/contributor/action.rst +++ /dev/null @@ -1,317 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======= -Actions -======= - -An action is an abstraction of some logic that can be executed by a worker -thread. Most of the operations supported by Senlin are executed asynchronously, -which means they are queued into database and then picked up by certain worker -thread for execution. - -Currently, Senlin only supports builtin actions listed below. In future, we -may evolve to support user-defined actions (UDAs). A user-defined action may -carry a Shell script to be executed on a target Nova server, or a Heat -SoftwareConfig to be deployed on a stack, for example. The following builtin -actions are supported at the time of this design: - -- ``CLUSTER_CREATE``: An action for creating a cluster; -- ``CLUSTER_DELETE``: An action for deleting a cluster; -- ``CLUSTER_UPDATE``: An action for updating a cluster; -- ``CLUSTER_ADD_NODES``: An action for adding existing nodes to a cluster; -- ``CLUSTER_DEL_NODES``: An action for removing nodes from a cluster; -- ``CLUSTER_REPLACE_NODES``: An action for replacing nodes in a cluster; -- ``CLUSTER_RESIZE``: An action for adjusting the size of a cluster; -- ``CLUSTER_SCALE_IN``: An action to shrink the size of a cluster by removing - nodes from the cluster; -- ``CLUSTER_SCALE_OUT``: An action to extend the size of a cluster by creating - new nodes using the ``profile_id`` of the cluster; -- ``CLUSTER_ATTACH_POLICY``: An action to attach a policy to a cluster; -- ``CLUSTER_DETACH_POLICY``: An action to detach a policy from a cluster; -- ``CLUSTER_UPDATE_POLICY``: An action to update the properties of a binding - between a cluster and a policy; -- ``CLUSTER_CHECK``: An action for checking a cluster and execute ``NODE_CHECK`` - for all its nodes; -- ``CLUSTER_RECOVER``: An action for recovering a cluster and execute - ``NODE_RECOVER`` for all the nodes in 'ERROR' status; -- ``NODE_CREATE``: An action for creating a new node; -- ``NODE_DELETE``: An action for deleting an existing node; -- ``NODE_UPDATE``: An action for updating the properties of an existing node; -- ``NODE_JOIN``: An action for joining a node to an existing cluster; -- ``NODE_LEAVE``: An action for a node to leave its current owning cluster; -- ``NODE_CHECK``: An action for checking a node to see if its physical node is - 'ACTIVE' and update its status with 'ERROR' if not; -- ``NODE_RECOVER``: An action for recovering a node; - - -Action Properties -~~~~~~~~~~~~~~~~~ - -An action has the following properties when created: - -- ``id``: a globally unique ID for the action object; -- ``name``: a string representation of the action name which might be - generated automatically for actions derived from other operations; -- ``context``: a dictionary that contains the calling context that will be - used by the engine when executing the action. Contents in this dictionary - may contain sensitive information such as user credentials. -- ``action``: a text property that contains the action body to be executed. - Currently, this property only contains the name of a builtin action. In - future, we will provide a structured definition of action for UDAs. -- ``target``: the UUID of an object (e.g. a cluster, a node or a policy) to - be operated; -- ``cause``: a string indicating the reason why this action was created. The - purpose of this property is for the engine to check whether a new lock should - be acquired before operating an object. Valid values for this property - include: - - * ``RPC Request``: this indicates that the action was created upon receiving - a RPC request from Senlin API, which means a lock is likely needed; - * ``Derived Action``: this indicates that the action was created internally - as part of the execution path of another action, which means a lock might - have been acquired; - -- ``owner``: the UUID of a worker thread that currently "owns" this action and - is responsible for executing it. -- ``interval``: the interval (in seconds) for repetitive actions, a value of 0 - means that the action won't be repeated; -- ``start_time``: timestamp when the action was last started. This field is - provided for action execution timeout detection; -- ``stop_time``: timestamp when the action was stopped. This field is provided - for measuring the execution time of an action; -- ``timeout``: timeout (in seconds) for the action execution. A value of 0 - means that the action does not have a customized timeout constraint, though - it may still have to honor the system wide ``default_action_timeout`` - setting. -- ``status``: a string representation of the current status of the action. See - subsection below for detailed status definitions. -- ``status_reason``: a string describing the reason that has led the action to - its current status. -- ``control``: a string for holding the pending signals such as ``CANCEL``, - ``SUSPEND`` or ``RESUME``. -- ``inputs``: a dictionary that provides inputs to the action when executed; -- ``outputs``: a dictionary that captures the outputs (including error - messages) from the action execution; -- ``depends_on``: a UUID list for the actions that must be successfully - completed before the current action becomes ``READY``. An action cannot - become ``READY`` when this property is not an empty string. -- ``depended_by``: a UUID list for the actions that depends on the successful - completion of current action. When the current action is completed with a - success, the actions listed in this property will get notified. -- ``created_at``: the timestamp when the action was created; -- ``updated_at``: the timestamp when the action was last updated; - -*TODO*: Add support for scheduled action execution. - -*NOTE*: The default value of the ``default_action_timeout`` is 3600 seconds. - - -The Action Data Property ------------------------- - -An action object has a property named ``data`` which is used for saving policy -decisions. This property is a Python dict for different policies to save and -exchange policy decision data. - -Suppose we have a scaling policy, a deletion policy and a load-balancing -policy attached to the same cluster. By design, when an ``CLUSTER_SCALE_IN`` -action is picked up for execution, the following sequence will happen: - -1) When the action is about to be executed, the worker thread checks all - policies that have registered a "pre_op" on this action type. -2) Based on the built-in priority setting, the "pre_op" of the scaling policy - is invoked, and the policy determines the number of nodes to be deleted. - This decision is saved to the action's ``data`` property in the following - format: - -:: - - "deletion": { - "count": 2 - } - -3) Based on the built-in priority setting, the deletion policy is evaluated - next. When the "pre_op" method of the deletion policy is invoked, it first - checks the ``data`` property of the action where it finds out the number of - nodes to delete. Then it will calculate the list of candidates to be - deleted using its selection criteria (e.g. ``OLDEST_FIRST``). Finally, it - saves the list of candidate nodes to be deleted to the ``data`` property of - the action, in the following format: - -:: - - "deletion": { - "count": 2, - "candidates": ["1234-4567-9900", "3232-5656-1111"] - } - -4) According to the built-in priority setting, the load-balancing policy is - evaluated last. When invoked, its "pre_op" method checks the ``data`` - property of the action and finds out the candidate nodes to be removed from - the cluster. With this information, the method removes the nodes from the - load-balancer maintained by the policy. - -5) The action's ``execute()`` method is now invoked and it removes the nodes - as given in its ``data`` property, updates the cluster's last update - timestamp, then returns. - -From the example above, we can see that the ``data`` property of an action -plays a critical role in policy checking and enforcement. To avoid losing of -the in-memory ``data`` content during service restart, Senlin persists the -content to database whenever it is changed. - -Note that there are policies that will write to the ``data`` property of a -node for a similar reason. For example, a placement policy may decide where a -new node should be created. This information is saved into the ``data`` -property of a node. When a profile is about to create a node, it is supposed -to check this property and enforce it. For a Nova server profile, this means -that the profile code will inject ``scheduler_hints`` to the server instance -before it is created. - - -Action Statuses -~~~~~~~~~~~~~~~ - -An action can be in one of the following statuses during its lifetime: - -- ``INIT``: Action object is being initialized, not ready for execution; -- ``READY``: Action object can be picked up by any worker thread for - execution; -- ``WAITING``: Action object has dependencies on other actions, it may - become ``READY`` only when the dependents are all completed with successes; -- ``WAITING_LIFECYCLE_COMPLETION``: Action object is a node deletion that is - awaiting lifecycle completion. It will become ``READY`` when complete - lifecycle API is called or the lifecycle hook timeout in deletion policy is - reached. -- ``RUNNING``: Action object is being executed by a worker thread; -- ``SUSPENDED``: Action object is suspended during execution, so the only way - to put it back to ``RUNNING`` status is to send it a ``RESUME`` signal; -- ``SUCCEEDED``: Action object has completed execution with a success; -- ``FAILED``: Action object execution has been aborted due to failures; -- ``CANCELLED``: Action object execution has been aborted due to a ``CANCEL`` - signal. - -Collectively, the ``SUCCEEDED``, ``FAILED`` and ``CANCELLED`` statuses are all -valid action completion status. - - -The ``execute()`` Method and Return Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Each subclass of the base ``Action`` must provide an implementation of the -``execute()`` method which provides the actual logic to be invoked by the -generic action execution framework. - -Senlin defines a protocol for the execution of actions. The ``execute()`` -method should always return a tuple ``, `` where the ```` -indicates whether the action procedure execution was successful and the -```` provides an explanation of the result, e.g. the error message -when the execution has failed. In this protocol, the action procedure can -return one of the following values: - -- ``OK``: the action execution was a complete success; -- ``ERROR``: the action execution has failed with error messages; -- ``RETRY``: the action execution has encountered some resource competition - situation, so the recommendation is to re-start the action if possible; -- ``CANCEL``: the action has received a ``CANCEL`` signal and thus has aborted - its execution; -- ``TIMEOUT``: the action has detected a timeout error when performing some - time consuming jobs. - -When the return value is ``OK``, the action status will be set to -``SUCCEEDED``; when the return value is ``ERROR`` or ``TIMEOUT``, the action -status will be set to ``FAILED``; when the return value is ``CANCEL``, the -action status will be set to ``CANCELLED``; finally, when the return value is -``RETRY``, the action status is reset to ``READY``, and the current worker -thread will release its lock on the action so that other threads can pick it -up when resources permit. - - -Creating An Action -~~~~~~~~~~~~~~~~~~ - -Currently, Senlin actions are mostly generated from within the Senlin engine, -either due to a RPC request, or due to another action's execution. - -In future, Senlin plans to support user-defined actions (UDAs). Senlin API will -provide API for creating an UDA and invoking an action which can be an UDA. - - -Listing Actions -~~~~~~~~~~~~~~~ - -Senlin provides an ``action_list`` API for users to query the action objects -in the Senlin database. Such a query request can be accompanied with the -following query parameters in the query string: - -- ``filters``: a map that will be used for filtering out records that fail to - match the criteria. The recognizable keys in the map include: - - * ``name``: the name of the actions where the value can be a string or a - list of strings; - * ``target``: the UUID of the object targeted by the action where the value - can be a string or a list of strings; - * ``action``: the builtin action for matching where the value can be a - string or a list of strings; - -- ``limit``: a number that restricts the maximum number of action records to be - returned from the query. It is useful for displaying the records in pages - where the page size can be specified as the limit. -- ``marker``: A string that represents the last seen UUID of actions in - previous queries. This query will only return results appearing after the - specified UUID. This is useful for displaying records in pages. -- ``sort``: A string to enforce sorting of the results. It accepts a list of - known property names of an action as sorting keys separated by commas. Each - sorting key can optionally have either ``:asc`` or ``:desc`` appended to the - key for controlling the sorting direction. - - -Getting An Action -~~~~~~~~~~~~~~~~~ - -Senlin API provides the ``action_show`` API call for software or a user to -retrieve a specific action for examining its details. When such a query -arrives at the Senlin engine, the engine will search the database for the -``action_id`` specified. - -User can provide the UUID, the name or the short ID of an action as the -``action_id`` for query. The Senlin engine will try each of them in sequence. -When more than one action matches the criteria, an error message is returned -to user, or else the details of the action object is returned. - - -Signaling An Action -~~~~~~~~~~~~~~~~~~~ - -When an action is in ``RUNNING`` status, a user can send signals to it. A -signal is actually a word that will be written into the ``control`` field of -the ``action`` table in the database. - -When an action is capable of handling signals, it is supposed to check its -``control`` field in the DB table regularly and abort execution in a graceful -way. An action has the freedom to check or ignore these signals. In other -words, Senlin cannot guarantee that a signal will have effect on any action. - -The currently supported signal words are: - -- ``CANCEL``: this word indicates that the target action should cancel its - execution and return when possible; -- ``SUSPEND``: this word indicates that the target action should suspend its - execution when possible. The action doesn't have to return. As an - alternative, it can sleep waiting on a ``RESUME`` signal to continue its - work; -- ``RESUME``: this word indicates that the target action, if suspended, should - resume its execution. - -The support to ``SUSPEND`` and ``RESUME`` signals are still under development. diff --git a/doc/source/contributor/api_microversion.rst b/doc/source/contributor/api_microversion.rst deleted file mode 100644 index 59262a342..000000000 --- a/doc/source/contributor/api_microversion.rst +++ /dev/null @@ -1,374 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=================== -API Microversioning -=================== - -Background -~~~~~~~~~~ - -The *API Microversioning* is a framework in Senlin to enable smooth evolution -of the Senlin REST API while preserving its backward compatibility. The basic -idea is that a user has to explicitly specify the particular version of API -requested in the request. Disruptive changes to the API can then be added -without breaking existing users who don't specifically ask for it. This is -done with an HTTP header ``OpenStack-API-Version`` as suggested by the -OpenStack API Working Group. The value of the header should contain the -service name (``clustering``) and the desired API version which is a -monotonically increasing semantic version number starting from ``1.0``. - -If a user makes a request without specifying a version, they will get the -``DEFAULT_API_VERSION`` as defined in ``senlin.api.common.wsgi``. This value -is currently ``1.0`` and is expected to remain so for quite a long time. - -There is a special value "``latest``" which can be specified, which will allow -a client to always invoke the most recent version of APIs from the server. - -.. warning:: The ``latest`` value is mostly meant for integration testing and - would be dangerous to rely on in client code since Senlin microversions are - not following semver and therefore backward compatibility is not guaranteed. - Clients, like python-senlinclient or openstacksdk, python-openstackclient - should always require a specific microversion but limit what is acceptable - to the version range that it understands at the time. - - -When to Bump the Microversion -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A microversion is needed when the contract to the user is changed. The user -contract covers many kinds of information such as: - -- the Request - - - the list of resource URLs which exist on the server - - Example: adding a new ``GET clusters/{ID}/foo`` resource which didn't exist - in a previous version of the code - - - the list of query parameters that are valid on URLs - - Example: adding a new parameter ``is_healthy`` when querying a node by - ``GET nodes/{ID}?is_healthy=True`` - - - the list of query parameter values for non-freeform fields - - Example: parameter ``filters`` takes a small set of properties "``A``", - "``B``", "``C``", now support for new property "``D``" is added - - - new headers accepted on a request - - - the list of attributes and data structures accepted. - - Example: adding a new attribute ``'locked': True/False`` to a request body - -- the Response - - - the list of attributes and data structures returned - - Example: adding a new attribute ``'locked': True/False`` to the output - of ``GET clusters/{ID}`` - - - the allowed values of non-freeform fields - - Example: adding a new allowed "``status``" field to ``GET servers/{ID}`` - - - the list of status codes allowed for a particular request - - Example: an API previously could return 200, 400, 403, 404 and the - change would make the API now also be allowed to return 409. - - - changing a status code on a particular response - - Example: changing the return code of an API from 501 to 400. - - .. note:: According to the OpenStack API Working Group, a - **500 Internal Server Error** should **NOT** be returned to the user for - failures due to user error that can be fixed by changing the request on - the client side. This kind of a fix doesn't require a change to the - microversion. - - - new headers returned on a response - -The following flow chart attempts to walk through the process of "do -we need a microversion". - - -.. graphviz:: - - digraph states { - - label="Do I need a microversion?" - - silent_fail[shape="diamond", style="", group=g1, label="Did we silently - fail to do what is asked?"]; - ret_500[shape="diamond", style="", group=g1, label="Did we return a 500 - before?"]; - new_error[shape="diamond", style="", group=g1, label="Are we changing the - status code returned?"]; - new_attr[shape="diamond", style="", group=g1, label="Did we add or remove - an attribute to a resource?"]; - new_param[shape="diamond", style="", group=g1, label="Did we add or remove - an accepted query string parameter or value?"]; - new_resource[shape="diamond", style="", group=g1, label="Did we add or - remove a resource url?"]; - - - no[shape="box", style=rounded, label="No microversion needed"]; - yes[shape="box", style=rounded, label="Yes, you need a microversion"]; - no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; - - silent_fail -> ret_500[label=" no"]; - silent_fail -> no2[label="yes"]; - - ret_500 -> no2[label="yes [1]"]; - ret_500 -> new_error[label=" no"]; - - new_error -> new_attr[label=" no"]; - new_error -> yes[label="yes"]; - - new_attr -> new_param[label=" no"]; - new_attr -> yes[label="yes"]; - - new_param -> new_resource[label=" no"]; - new_param -> yes[label="yes"]; - - new_resource -> no[label=" no"]; - new_resource -> yes[label="yes"]; - - {rank=same; yes new_attr} - {rank=same; no2 ret_500} - {rank=min; silent_fail} - } - - -.. NOTE:: The reason behind such a strict contract is that we want application - developers to be sure what the contract is at every microversion in Senlin. - - When in doubt, consider application authors. If it would work with no client - side changes on both Nova versions, you probably don't need a microversion. - If, however, there is any ambiguity, a microversion is likely needed. - - -When a Microversion Is Not Needed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A microversion is not needed in the following situations: - -- the response - - - Changing the error message without changing the response code does not - require a new microversion. - - - Removing an inapplicable HTTP header, for example, suppose the Retry-After - HTTP header is being returned with a 4xx code. This header should only be - returned with a 503 or 3xx response, so it may be removed without bumping - the microversion. - - -Working with Microversions -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the ``senlin.api.common.wsgi`` module, we define an ``@api_version`` -decorator which is intended to be used on top-level methods of controllers. -It is not appropriate for lower-level methods. - - -Adding a New API Method ------------------------ - -In the controller class: - -.. code-block:: python - - @wsgi.Controller.api_version("2.4") - def my_api_method(self, req, id): - .... - -This method is only available if the caller had specified a request header -``OpenStack-API-Version`` with value ``clustering `` and ```` is >= -``2.4``. If they had specified a lower version (or omitted it thus got the -default of ``1.0``) the server would respond with HTTP 404. - - -Removing an API Method ----------------------- - -In the controller class: - -.. code-block:: python - - @wsgi.Controller.api_version("2.1", "2.4") - def my_api_method(self, req, id): - .... - -This method would only be available if the caller had specified an -``OpenStack-API-Version`` with value ``clustering `` and the ```` is -<= ``2.4``. If ``2.5`` or later is specified the server will respond with -HTTP 404. - - -Changing a Method's Behavior ----------------------------- - -In the controller class: - -.. code-block:: python - - @wsgi.Controller.api_version("1.0", "2.3") - def my_api_method(self, req, id): - .... method_1 ... - - @wsgi.Controller.api_version("2.4") # noqa - def my_api_method(self, req, id): - .... method_2 ... - -If a caller specified ``2.1``, ``2.2`` or ``2.3`` (or received the default of -``1.0``) they would see the result from ``method_1``, ``2.4`` or later -``method_2``. - -It is vital that the two methods have the same name, so the second one will -need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may -be different in any kind of semantics (schema validation, return values, -response codes, etc.) - - -When Not Using Decorators -------------------------- - -When you don't want to use the ``@api_version`` decorator on a method or you -want to change behavior within a method (say it leads to simpler or simply a -lot less code) you can directly test for the requested version with a method -as long as you have access to the API request object. Every API method has an -``version_request`` object attached to the ``Request`` object and that can be -used to modify behavior based on its value: - -.. code-block:: python - - import senlin.api.common.version_request as vr - - def index(self, req): - # common code ... - - req_version = req.version_request - req1_min = vr.APIVersionRequest("2.1") - req1_max = vr.APIVersionRequest("2.5") - req2_min = vr.APIVersionRequest("2.6") - req2_max = vr.APIVersionRequest("2.10") - - if req_version.matches(req1_min, req1_max): - # stuff... - elif req_version.matches(req2min, req2_max): - # other stuff... - elif req_version > vr.APIVersionRequest("2.10"): - # more stuff... - - # common code ... - -The first argument to the matches method is the minimum acceptable version -and the second is maximum acceptable version. A specified version can be null: - -.. code-block:: python - - null_version = APIVersionRequest() - -If the minimum version specified is null then there is no restriction on -the minimum version, and likewise if the maximum version is null there -is no restriction the maximum version. Alternatively an one sided comparison -can be used as in the example above. - - -Planning and Committing Changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once the idea of an API change is discussed with the core team and the -consensus has been reached to bump the micro-version of Senlin API, you can -start working on the changes in the following order: - -1. Prepare the engine and possibly the action layer for the change. One STRICT - requirement is that the newly proposed change(s) should not break any - existing users. - -2. Add a new versioned object if a new API is introduced; or modify the fields - of an existing object representing the API request. You are expected to - override the ``obj_make_compatible()`` method to ensure the request formed - will work on an older version of engine. - -3. If the change is about modifying an existing API, you will need to bump the - version of the request object. You are also required to add or change the - ``VERSION_MAP`` dictionary of the request object class where the key is the - API microversion and the value is the object version. For example: - -.. code-block:: python - - @base.SenlinObjectRegistry.register - class ClusterDanceRequest(base.SenlinObject): - - # VERSION 1.0: Initial version - # VERSION 1.1: Add field 'style' - VERSION = '1.1' - VERSION_MAP = { - 'x.y': '1.1' - } - - fields = { - ... - 'style': fields.StringField(nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - # add the logic to convert the request for a target version - ... - - -4. Patch the API layer to introduce the change. This involves changing the - ``senlin/api/openstack/history.rst`` file to include the descriptive - information about the changes made. - -5. Revise the API reference documentation so that the changes are properly - documented. - -6. Add a release note entry for the API change. - -7. Add tempest based API test and functional tests. - -8. Update ``_MAX_API_VERSION`` in ``senlin.api.openstack.versions``, if needed. - Note that each time we bump the API microversion, we may introduce two or - more changes rather than one single change, the update of - ``_MAX_API_VERSION`` needs to be done only once if this is the case. - -9. Commit patches to the ``openstacksdk`` project so that new API - changes are accessible from client side. - -10. Wait for the new release of ``openstacksdk`` project that includes - the new changes and then propose changes to ``python-senlinclient`` - project. - - -Allocating a microversion -~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you are adding a patch which adds a new microversion, it is necessary to -allocate the next microversion number. Except under extremely unusual -circumstances, the minor number of ``_MAX_API_VERSION`` will be incremented. -This will also be the new microversion number for the API change. - -It is possible that multiple microversion patches would be proposed in -parallel and the microversions would conflict between patches. This will -cause a merge conflict. We don't reserve a microversion for each patch in -advance as we don't know the final merge order. Developers may need over time -to rebase their patch calculating a new version number as above based on the -updated value of ``_MAX_API_VERSION``. - - -.. include:: ../../../senlin/api/openstack/history.rst diff --git a/doc/source/contributor/authorization.rst b/doc/source/contributor/authorization.rst deleted file mode 100644 index 3b1d93043..000000000 --- a/doc/source/contributor/authorization.rst +++ /dev/null @@ -1,191 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================== -Senlin Authorization -==================== - -As a service to be consumed by end users and possibly other IT persons, Senlin -has some basic components and strategies to manage access control. The design -is meant to be as open as possible though the current focus as this document is -drafted is on enabling Keystone-based (aka. token-based) OpenStack -authorization. - -This document presents an overview of the authentication and authorization -mechanisms provided by the Senlin API and its service engine. The top-most -design consideration of these mechanisms is to make it accommodating so that -the interactions with different authentication engines can be done using the -same framework. The reason behind this decision is to make Senlin cloud-backend -agnostic so it can be used to support clustering of resources in a multi-cloud, -or multi-region, or multi-availability-zone setups. - - -Major Components -~~~~~~~~~~~~~~~~ - -In the context of an OpenStack cloud, the most important components involved in -the authentication and the authorization process are: - -- The Senlin client (i.e. the `python-senlinclient` package) which accepts - user credentials provided through environment variables and/or the command - line arguments and forwards them to the OpenStack SDK (i.e. the - `openstacksdk` package) when making service requests to Senlin API. -- The OpenStack SDK (`openstacksdk`) is used by Senlin engine to - interact with any other OpenStack services. The Senlin client also uses the - SDK to talk to the Senlin API. The SDK package translates the user-provided - credentials into a token by invoking the Keystone service. -- The Keystone middleware (i.e. `keystonemiddleware`) which backs the - `auth_token` WSGI middleware in the Senlin API pipeline provides a basic - validation filter. The filter is responsible to validate the token that - exists in the HTTP request header and then populates the HTTP request header - with detailed information for the downstream filters (including the API - itself) to use. -- The `context` WSGI middleware which is based on the `oslo.context` package - provides a constructor of the `RequestContext` data structure that - accompanies any requests down the WSGI application pipeline so that those - downstream components don't have to access the HTTP request header. - - -Usage Scenarios -~~~~~~~~~~~~~~~ - -There are several ways to raise a service request to the Senlin API, each of -which has its own characteristics that will affect the way authentication -and/or authorization is performed. - -1) Users interact with Senlin service API using the OpenStack client (i.e. the - plugin provided by the `python-senlinclient` package). The requests, after - being preprocessed by the OpenStack SDK will contain a valid Keystone token - that can be validated by the `auth_token` WSGI middleware. -2) Users interact with Senlin service API directly by making HTTP requests - where the requester's credentials have been validated by Keystone so the - requests will carry a valid Keystone token for verification by the - `auth_token` middleware as well. -3) Users interact with Senlin service API directly by making HTTP requests, but - the requests are "naked" ones which mean that the requests do not contain - credentials as expected by Senlin API (or other OpenStack services). In - stead, the URI requested contains some special parameters for authentication - and/or authorization's purposes. - -Scenario 1) and 2) are the most common ways for users to use Senlin API. They -share the same request format when the request arrives at the Senlin API -endpoint. Scenario 3) is a little bit different. What Senlin wants to achieve -is making no assumption where the service requests come from. That means it -cannot assume that the requester (could be any program) will fill in the -required headers in their service requests. One example of such use cases is -the Webhook API Senlin provides that enables a user to trigger an action on an -object managed by Senlin. Senlin provides a special support to these use cases. - - -Operation Delegation (Trusts) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Since Senlin models most operations as "Actions" that can be executed by -worker threads asynchronously, these operations have to be done on behalf of -the requester so that they can be properly traced, authenticated, audited or -logged. - - -Credentials and Context ------------------------ - -A generic solution to the delegation problem is to ask users to provide their -credentials to Senlin so Senlin can impersonate them when interacting with -other services. In fact, this may be the only solution that can be applied on -different cloud backends. - -Senlin supports a `context` property for all "profile" types by default unless -overridden by a profile type implementation. This context can be treated as a -container for these credentials. Storing user credential in Senlin database -does imply a security risk. In future, we hope Senlin can make use of the -Barbican service for this purpose. - -Senlin's implementation of context is based on the `oslo_context` package. -There is still room for improvement thanks to the new enhancements to that -package. - - -Trusts: Dealing with Token Expiration -------------------------------------- - -In some cases, the solution above may be impractical because after the -client-side processing and/or the front-end middleware filtering, Senlin -cannot get the original user credentials (e.g. user name and password). -Senlin can only get a "token", which expires in an hour by default. This means -that after no more than one hour, Senlin won't be able to use this token for -authentication/authorization. - -The OpenStack identity service (a.k.a Keystone) has considered this situation -and provided a solution. When a requester wants to delegate his/her roles in a -project to a 3rd party, he or she can create a "Trust" relationship between -him/her (the trustor) and that 3rd party (the trustee). The "Trust" has a -unique ID that can be used by the trustee when authenticating with Keystone. -Once trust ID is authenticated, the trustee can perform operations on behalf -of the trustor. - -The trust extension in Keystone V3 can be used to solve the token expiration -problem. There are two ways to do this as shown below. - -1) Requester Created Trusts: Before creating a profile, a requester can create - a trust with the trustee set to the `senlin` user. He or she can customize - the roles that can be assumed by `senlin`, which can be a subset of the - roles the requester currently has in that project. When the requester later - on creates a profile, he or she can provide the `trust_id` as a key of the - `context` property. Senlin can later on use this trust for authentication - and authorization's purpose. -2) Senlin Created Trusts: The solution above adds some burdens for an end user. - In order to make Senlin service easy of use, Senlin will do the trust - creation in the background. Whenever a new request comes in, Senlin will - check if there is an existing trust relationship between the requester and - the `senlin` user. Senlin will "hijack" the user's token and create a trust - with `senlin` as the trustee. This trust relationship is currently stored - in Senlin database, and the management of this sensitive information can be - delegated to Barbican as well in future. - - -Precedence Consideration ------------------------- - -Since there now exist more than one place for Senlin to get the credentials -for use, Senlin needs to impose a precedence among the credential sources. - -When Senlin tries to contact a cloud service via a driver, the requests are -issued from a subclass of `Profile`. Senlin will check the `user` property of -the targeted cluster or node and retrieve the trust record from database using -the `user` as the key. By default, Senlin will try obtain a new token from -Keystone using the `senlin` user's credentials (configured in `senlin.conf` -file) and the `trust_id`. Before doing that, Senlin will check if the profile -used has a "customized" `context`. If there are credentials such as `password` -or `trust_id` in the context, Senlin deletes its current `trust_id` from the -context, and adds the credentials found in the profile into the context. - -In this way, a user can specify the credentials Senlin should use when talking -to other cloud services by customizing the `context` property of a profile. -The specified credentials may and may not belong to the requester. - - -Trust Middleware ----------------- - -When a service request arrives at Senlin API, Senlin API checks if there is a -trust relationship built between the requester user and the `senlin` user. A -new trust is created if no such record is found. - -Once a trust is found or created, the `trust_id` is saved into the current -`context` data structure. Down the invocation path, or during asynchronous -action executions, the `trust_id` will be used for token generation when -needed. - -Senlin provides an internal database table to store the trust information. It -may be removed in future when there are better ways to handle this sensitive -information. diff --git a/doc/source/contributor/cluster.rst b/doc/source/contributor/cluster.rst deleted file mode 100644 index 6cae77fc9..000000000 --- a/doc/source/contributor/cluster.rst +++ /dev/null @@ -1,624 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======== -Clusters -======== - -Clusters are first-class citizens in Senlin service design. A cluster is -defined as a collection of homogeneous objects. The "homogeneous" here means -that the objects managed (aka. Nodes) have to be instantiated from the same -"profile type". - -A cluster can contain zero or more nodes. Senlin provides REST APIs for users -to create, retrieve, update, delete clusters. Using these APIs, a user can -manage the node membership of a cluster. - -A cluster is owned by a user (the owner), and it is accessible from within the -Keystone project (tenant) which is the default project of the user. - -A cluster has the following timestamps when instantiated: - -- ``init_at``: the timestamp when a cluster object is initialized in the - Senlin database, but the actual cluster creation has not yet started; -- ``created_at``: the timestamp when the cluster object is created, i.e. - the ``CLUSTER_CREATE`` action has completed; -- ``updated_at``: the timestamp when the cluster was last updated. - - -Cluster Statuses -~~~~~~~~~~~~~~~~ - -A cluster can have one of the following statuses during its lifecycle: - -- ``INIT``: the cluster object has been initialized, but not created yet; -- ``ACTIVE``: the cluster is created and providing service; -- ``CREATING``: the cluster creation action is still on going; -- ``ERROR``: the cluster is still providing services, but there are things - going wrong that needs human intervention; -- ``CRITICAL``: the cluster is not operational, it may or may not be - providing services as expected. Senlin cannot recover it from its current - status. The best way to deal with this cluster is to delete it and then - re-create it if needed. -- ``DELETING``: the cluster deletion is ongoing; -- ``WARNING``: the cluster is operational, but there are some warnings - detected during past operations. In this case, human involvement is - suggested but not required. -- ``UPDATING``: the cluster is being updated. - -Along with the ``status`` property, Senlin provides a ``status_reason`` -property for users to check what is the cause of the cluster's current status. - -To avoid frequent databases accesses, a cluster object has a runtime data -property named ``rt`` which is a Python dictionary. The property caches the -profile referenced by the cluster, the list of nodes in the cluster and the -policies attached to the cluster. The runtime data is not directly visible to -users. It is merely a convenience for cluster operations. - - -Creating A Cluster -~~~~~~~~~~~~~~~~~~ - -When creating a cluster, the Senlin API will verify whether the request -carries a body with valid, sufficient information for the engine to complete -the creation job. The following fields are required in a map named ``cluster`` -in the request JSON body: - -- ``name``: the name of the cluster to be created; -- ``profile``: the name or ID or short-ID of a profile to be used; -- ``desired_capacity``: the desired number of nodes in the cluster, which is - treated also as the initial number of nodes to be created. - -The following optional fields can be provided in the ``cluster`` map in the -JSON request body: - -- ``min_size``: the minimum number of nodes inside the cluster, default - value is 0; -- ``max_size``: the maximum number of nodes inside the cluster, default - value is -1, which means there is no upper limit on the number of nodes; -- ``timeout``: the maximum number of seconds to wait for the cluster to - become ready, i.e. ``ACTIVE``. -- ``metadata``: a list of key-value pairs to be associated with the cluster. -- ``dependents``: A dict contains dependency information between nova server/ - heat stack cluster and container cluster. The container node's id will be - stored in 'dependents' property of its host cluster. - -The ``max_size`` and the ``min_size`` fields, when specified, will be checked -against each other by the Senlin API. The API also checks if the specified -``desired_capacity`` falls out of the range [``min_size``, ``max_size``]. If -any verification failed, a ``HTTPBadRequest`` exception is thrown and the -cluster creation request is rejected. - -A cluster creation request is then forwarded to the Senlin RPC engine for -processing, where the engine creates an Action for the request and queues it -for any worker threads to execute. Once the action is queued, the RPC engine -returns the current cluster properties in a map to the API. Along with these -properties, the engine also returns the UUID of the Action that will do the -real job of cluster creation. A user can check the status of the action to -determine whether the cluster has been successfully completed or failed. - - -Listing Clusters -~~~~~~~~~~~~~~~~ - -Clusters in the current project can be queried using some query parameters. -None of these parameters is required. By default, the Senlin API will return -all clusters that are not deleted. - -When listing clusters, the following query parameters can be specified, -individually or combined: - -- ``filters``: a map containing key-value pairs for matching. Records that - fail to match the criteria will be filtered out. The valid keys in this map - include: - - * ``name``: name of clusters to list, can be a string or a list of strings; - * ``status``: status of clusters, can be a string or a list of strings; - -- ``limit``: a number that restricts the maximum number of records to be - returned from the query. It is useful for displaying the records in pages - where the page size can be specified as the limit. -- ``marker``: A string that represents the last seen UUID of clusters in - previous queries. This query will only return results appearing after the - specified UUID. This is useful for displaying records in pages. -- ``sort``: A string to enforce sorting of the results. It accepts a list of - known property names of a cluster as sorting keys separated by commas. Each - sorting key can optionally have either ``:asc`` or ``:desc`` appended to the - key for controlling the sorting direction. -- ``global_project``: A boolean indicating whether cluster listing should be - done in a tenant-safe way. When this value is specified as False (the - default), only clusters from the current project that match the other - criteria will be returned. When this value is specified as True, clusters - that matching all other criteria would be returned, no matter in which - project a cluster was created. Only a user with admin privilege is permitted - to do a global listing. - - -Getting a Cluster -~~~~~~~~~~~~~~~~~ - -When a user wants to check the details about a specific cluster, he or she can -specify one of the following keys for query: - -- cluster UUID: Clusters are queried strictly based on the UUID given. This is - the most precise query supported. -- cluster name: Senlin allows multiple clusters to have the same name. It is - user's responsibility to avoid name conflicts if needed. The output may be - the details of a cluster if the cluster name is unique, or else Senlin will - return a message telling users that multiple clusters found matching the - specified name. -- short ID: Considering that UUID is a long string not so convenient to input, - Senlin supports a short version of UUIDs for query. Senlin engine will use - the provided string as a prefix to attempt a matching in the database. When - the "ID" is long enough to be unique, the details of the matching cluster is - returned, or else Senlin will return an error message indicating that more - than one cluster matching the short ID have been found. - -Senlin engine service will try the above three ways in order to find a match -in database. - -In the returned result, Senlin injects a list of node IDs for nodes in the -cluster. It also injects the name of the profile used by the cluster. These -are all for user's convenience. - - -Updating A Cluster -~~~~~~~~~~~~~~~~~~ - -A cluster can be updated upon user's requests. In theory, all properties of a -cluster could be updated/changed. However, some update operations are light --weight ones, others are heavy weight ones. This is because the semantics of -properties differ a lot from each other. Currently, cluster profile related -changes and cluster size related changes are heavy weight because they may -induce a chain of operations on the cluster. Updating other properties are -light weight operations. - -In the JSON body of a ``cluster_update`` request, users can specify new values -for the following properties: - -- ``name``: new cluster name; -- ``profile_id``: ID or name or short ID of a profile object to use; -- ``metadata``: a list of key-value pairs to be associated with the cluster, - this dict will be merged with the existing key-value pairs based on keys. -- ``desired_capacity``: new *desired* size for the cluster; -- ``min_size``: new lower bound for the cluster size; -- ``max_size``: new upper bound for the cluster size. -- ``timeout``: new timeout value for the specified cluster. -- ``profile_only``: a boolean value indicating whether cluster will be only - updated with profile. - - -Update Cluster's Profile ------------------------- - -When ``profile_id`` is specified, the request will be interpreted as a -wholistic update to all nodes across the cluster. The targeted use case is to -do a cluster wide system upgrade. For example, replacing glance images used by -the cluster nodes when new kernel patches have been applied or software -defects have been fixed. - -When receiving such an update request, the Senlin engine will check if the new -profile referenced does exist and whether the new profile has the same profile -type as that of the existing profile. Exceptions will be thrown if any -verification has failed and thus the request is rejected. - -After the engine has validated the request, an Action of ``CLUSTER_UPDATE`` is -created and queued internally for execution. Later on, when a worker thread -picks up the action for execution, it will first lock the whole cluster and -mark the cluster status as ``UPDATING``. It will then fork ``NODE_UPDATE`` -actions per node inside the cluster, which are in turn queued for execution. -Other worker threads will pick up the node level update action for execution -and mark the action as completed/failed. When all these node level updates are -completed, the ``CLUSTER_UPDATE`` operation continues and marks the cluster as -``ACTIVE`` again. - -Senlin also provides a parameter ``profile_only`` for this action, so that any -newly created nodes will use the new profile, but existing nodes should not be -changed. - -The cluster update operation may take a long time to complete, depending on -the response time from the underlying profile operations. Note also, when -there is a update policy is attached to the cluster and enabled, the update -operation may be split into several batches so that 1) there is a minimum -number of nodes remained in service at any time; 2) the pressure on the -underlying service is controlled. - - -Update Cluster Size Properties ------------------------------- - -When either one of the ``desired_capacity``, ``min_size`` and ``max_size`` -property is specified in the ``CLUSTER_UPDATE`` request, it may lead to a -resize operation on the cluster. - -The Senlin API will do a preliminary validation upon the new property values. -For example, if both ``min_size`` and ``max_size`` are specified, they have to -be integers and the value for ``max_size`` is greater than the value for -``min_size``, unless the value of ``max_size`` is -1 which means the upper -bound of cluster size is unlimited. - -When the request is then received by the Senlin engine, the engine first -retrieves the cluster properties from the database and do further -cross-verifications between the new property values and the current values. -For example, it is treated as an invalid request if a user has specified value -for ``min_size`` but no value for ``max_size``, however the new ``min_size`` -is greater than the existing ``max_size`` of the cluster. In this case, the -user has to provide a valid ``max_size`` to override the existing value, or -he/she has to lower the ``min_size`` value so that the request becomes -acceptable. - -Once the cross-verification has passed, Senlin engine will calculate the new -``desired_capacity`` and adjust the size of the cluster if deemed necessary. -For example, when the cluster size is below the new ``min_size``, new nodes -will be created and added to the cluster; when the cluster size is above the -new ``max_size``, some nodes will be removed from the cluster. If the -``desired_capacity`` is set and the property value falls between the new range -of cluster size, Senlin tries resize the cluster to the ``desired_capacity``. - -When the size of the cluster is adjusted, Senlin engine will check if there -are relevant policies attached to the cluster so that the engine will add -and/or remove nodes in a predictable way. - - -Update Other Cluster Properties -------------------------------- - -The update to other cluster properties is relatively straightforward. Senlin -engine simply verifies the data types when necessary and override the existing -property values in the database. - -Note that in the cases where multiple properties are specified in a single -``CLUSTER_UPDATE`` request, some will take a longer time to complete than -others. Any mixes of update properties are acceptable to the Senlin API and -the engine. - - -Cluster Actions -~~~~~~~~~~~~~~~ - -A cluster object supports the following asynchronous actions: - -- ``add_nodes``: add a list of nodes into the target cluster; -- ``del_nodes``: remove the specified list of nodes from the cluster; -- ``replace_nodes``: replace the specified list of nodes in the cluster; -- ``resize``: adjust the size of the cluster; -- ``scale_in``: explicitly shrink the size of the cluster; -- ``scale_out``: explicitly enlarge the size of the cluster. -- ``policy_attach``: attach a policy object to the cluster; -- ``policy_detach``: detach a policy object from the cluster; -- ``policy_update``: modify the settings of a policy that is attached to the - cluster. - -The ``scale_in`` and the ``scale_out`` actions are subject to change in future. -We recommend using the unified ``CLUSTER_RESIZE`` action for cluster size -adjustments. - -Software or a user can trigger a ``cluster_action`` API to issue an action -for Senlin to perform. In the JSON body of these requests, Senlin will verify -if the top-level key contains *one* of the above actions. When no valid action -name is found or more than one action is specified, the API will return error -messages to the caller and reject the request. - - -Adding Nodes to a Cluster -------------------------- - -Senlin API provides the ``add_nodes`` action for user to add some existing -nodes into the specified cluster. The parameter for this action is interpreted -as a list in which each item is the UUID, name or short ID of a node. - -When receiving an ``add_nodes`` action request, the Senlin API only validates -if the parameter is a list and if the list is empty. After this validation, -the request is forwarded to the Senlin engine for processing. - -The Senlin engine will examine nodes in the list one by one and see if any of -the following conditions is true. Senlin engine rejects the request if so. - -- Any node from the list is not in ``ACTIVE`` state? -- Any node from the list is still member of another cluster? -- Any node from the list is not found in the database? -- Number of nodes to add is zero? - -When this phase of validation succeeds, the request is translated into a -``CLUSTER_ADD_NODES`` builtin action and queued for execution. The engine -returns to the user an action UUID for checking. - -When the action is picked up by a worker thread for execution, Senlin checks -if the profile type of the nodes to be added matches that of the cluster. -Finally, a number of ``NODE_JOIN`` action is forked and executed from the -``CLUSTER_ADD_NODES`` action. When ``NODE_JOIN`` actions complete, the -``CLUSTER_ADD_NODES`` action returns with success. - -In the cases where there are load-balancing policies attached to the cluster, -the ``CLUSTER_ADD_NODES`` action will save the list of UUIDs of the new nodes -into the action's ``data`` field so that those policies could update the -associated resources. - - -Deleting Nodes from a Cluster ------------------------------ - -Senlin API provides the ``del_nodes`` action for user to delete some existing -nodes from the specified cluster. The parameter for this action is interpreted -as a list in which each item is the UUID, name or short ID of a node. - -When receiving a ``del_nodes`` action request, the Senlin API only validates -if the parameter is a list and if the list is empty. After this validation, -the request is forwarded to the Senlin engine for processing. - -The Senlin engine will examine nodes in the list one by one and see if any of -the following conditions is true. Senlin engine rejects the request if so. - -- Any node from the list cannot be found from the database? -- Any node from the list is not member of the specified cluster? -- Number of nodes to delete is zero? - -When this phase of validation succeeds, the request is translated into a -``CLUSTER_DEL_NODES`` builtin action and queued for execution. The engine -returns to the user an action UUID for checking. - -When the action is picked up by a worker thread for execution, Senlin forks a -number of ``NODE_DELETE`` actions and execute them asynchronously. When all -forked actions complete, the ``CLUSTER_DEL_NODES`` returns with a success. - -In the cases where there are load-balancing policies attached to the cluster, -the ``CLUSTER_DEL_NODES`` action will save the list of UUIDs of the deleted -nodes into the action's ``data`` field so that those policies could update the -associated resources. - -If a deletion policy with hooks property is attached to the cluster, the -``CLUSTER_DEL_NODES`` action will create the ``CLUSTER_DEL_NODES`` actions -in ``WAITING_LIFECYCLE_COMPLETION`` status which does not execute them. It -also sends the lifecycle hook message to the target specified in the -deletion policy. If the complete lifecylcle API is called for a -``CLUSTER_DEL_NODES`` action, it will be executed. If all the -``CLUSTER_DEL_NODES`` actions are not executed before the hook timeout -specified in the deletion policy is reached, the remaining -``CLUSTER_DEL_NODES`` actions are moved into ``READY`` status and scheduled -for execution. When all actions complete, the ``CLUSTER_DEL_NODES`` -returns with a success. - -Note also that by default Senlin won't destroy the nodes that are deleted -from the cluster. It simply removes the nodes from the cluster so that they -become orphan nodes. -Senlin also provides a parameter ``destroy_after_deletion`` for this action -so that a user can request the deleted node(s) to be destroyed right away, -instead of becoming orphan nodes. - - -Replacing Nodes in a Cluster ----------------------------- - -Senlin API provides the ``replace_nodes`` action for user to replace some existing -nodes in the specified cluster. The parameter for this action is interpreted -as a dict in which each item is the node-pair{OLD_NODE:NEW_NODE}. The key OLD_NODE -is the UUID, name or short ID of a node to be replaced, and the value NEW_NODE is -the UUID, name or short ID of a node as replacement. - -When receiving a ``replace_nodes`` action request, the Senlin API only validates -if the parameter is a dict and if the dict is empty. After this validation, -the request is forwarded to the Senlin engine for processing. - -The Senlin engine will examine nodes in the dict one by one and see if all of -the following conditions is true. Senlin engine accepts the request if so. - -- All nodes from the list can be found from the database. -- All replaced nodes from the list are the members of the specified cluster. -- All replacement nodes from the list are not the members of any cluster. -- The profile types of all replacement nodes match that of the specified - cluster. -- The statuses of all replacement nodes are ACTIVE. - -When this phase of validation succeeds, the request is translated into a -``CLUSTER_REPLACE_NODES`` builtin action and queued for execution. The engine -returns to the user an action UUID for checking. - -When the action is picked up by a worker thread for execution, Senlin forks a -number of ``NODE_LEAVE`` and related ``NODE_JOIN`` actions, and execute them -asynchronously. When all forked actions complete, the ``CLUSTER_REPLACE_NODES`` -returns with a success. - - -Resizing a Cluster ------------------- - -In addition to the ``cluster_update`` request, Senlin provides a dedicated API -for adjusting the size of a cluster, i.e. ``cluster_resize``. This operation -is designed for the auto-scaling and manual-scaling use cases. - -Below is a list of API parameters recognizable by the Senlin API when parsing -the JSON body of a ``cluster_resize`` request: - -- ``adjustment_type``: type of adjustment to be performed where the value - should be one of the followings: - - * ``EXACT_CAPACITY``: the adjustment is about the targeted size of the - cluster; - * ``CHANGE_IN_CAPACITY``: the adjustment is about the number of nodes to be - added or removed from the cluster and this is the default setting; - * ``CHANGE_IN_PERCENTAGE``: the adjustment is about a relative percentage of - the targeted cluster. - - This field is mandatory. -- ``number``: adjustment number whose value will be interpreted base on the - value of ``adjustment_type``. This field is mandatory. -- ``min_size``: the new lower bound for the cluster size; -- ``max_size``: the new upper bound for the cluster size; -- ``min_step``: the minimum number of nodes to be added or removed when the - ``adjustment_type`` is set to ``CHANGE_IN_PERCENTAGE`` and the absolute - value computed is less than 1; -- ``strict``: a boolean value indicating whether the service should do a - best-effort resizing operation even if the request cannot be fully met. - -For example, the following request is about increasing the size of the cluster -by 20% and Senlin can try a best-effort if the calculated size is greater than -the upper limit of the cluster size: - -:: - - { - "adj_type": "CHANGE_IN_PERCENTAGE", - "number": "20", - "strict": False, - } - -When Senlin API receives a ``cluster_resize`` request, it first validates the -data type of the values and the sanity of the value collection. For example, -you cannot specify a ``min_size`` greater than the current upper bound (i.e. -the ``max_size`` property of the cluster) if you are not providing a new -``max_size`` that is greater than the ``min_size``. - -After the request is forwarded to the Senlin engine, the engine will further -validates the parameter values against the targeted cluster. When all -validations pass, the request is converted into a ``CLUSTER_RESIZE`` action -and queued for execution. The API returns the cluster properties and the UUID -of the action at this moment. - -When executing the action, Senlin will analyze the request parameters and -determine the operations to be performed to meet user's requirement. The -corresponding cluster properties are updated before the resize operation -is started. - - -Scaling in/out a Cluster ------------------------- - -As a convenience method, Senlin provides the ``scale_out`` and the ``scale_in`` -action API for clusters. With these two APIs, a user can request a cluster to -be resized by the specified number of nodes. - -The ``scale_out`` and the ``scale_in`` APIs both take a parameter named -``count`` which is a positive integer. The integer parameter is optional, and -it specifies the number of nodes to be added or removed if provided. When it -is omitted from the request JSON body, Senlin engine will check if the cluster -has any relevant policies attached that will decide the number of nodes to be -added or removed respectively. The Senlin engine will use the outputs from -these policies as the number of nodes to create (or delete) if such policies -exist. When the request does contain a ``count`` parameter and there are -policies governing the scaling arguments, the ``count`` parameter value may -be overridden/ignored. - -When a ``scale_out`` or a ``scale_in`` request is received by the Senlin -engine, a ``CLUSTER_SCALE_OUT`` or a ``CLUSTER_SCALE_IN`` action is then -created and queued for execution after some validation of the parameter value. - -A worker thread picks up the action and execute it. The worker will check if -there are outputs from policy checkings. For ``CLUSTER_SCALE_OUT`` actions, -the worker checks if the policies checked has left a ``count`` key in the -dictionary named ``creation`` from the action's runtime ``data`` attribute. -The worker will use such a ``count`` value for node creation. For a -``CLUSTER_SCALE_OUT`` action, the worker checks if the policies checked has -left a ``count`` key in the dictionary named ``deletion`` from the action's -runtime ``data`` attribute. The worker will use such a ``count`` value for -node deletion. - -Note that both ``scale_out`` and ``scale_in`` actions will adjust the -``desired_capacity`` property of the target cluster. - - -Cluster Policy Bindings -~~~~~~~~~~~~~~~~~~~~~~~ - -Senlin API provides the following action APIs for managing the binding -relationship between a cluster and a policy: - -- ``policy_attach``: attach a policy to a cluster; -- ``policy_detach``: detach a policy from a cluster; -- ``policy_update``: update the properties of the binding between a cluster - and a policy. - - -Attaching a Policy to a Cluster -------------------------------- - -Once a policy is attached (bound) to a cluster, it will be enforced when -related actions are performed on that cluster, unless the policy is -(temporarily) disabled on the cluster. - -When attaching a policy to a cluster, the following properties can be -specified: - -- ``enabled``: a boolean indicating whether the policy should be enabled on - the cluster once attached. Default is True. When specified, it will override - the default setting for the policy. - -Upon receiving the ``policy_attach`` request, the Senlin engine will perform -some validations then translate the request into a ``CLUSTER_ATTACH_POLICY`` -action and queue the action for execution. The action's UUID is then returned -to Senlin API and finally the requestor. - -When the engine executes the action, it will try find if the policy is already -attached to the cluster. This checking was not done previously because the -engine must ensure that the cluster has been locked before this checking, or -else there might be race conditions. - -The engine calls the policy's ``attach`` method when attaching the policy and -record the binding into database if the ``attach`` method returns a positive -response. - -Currently, Senlin does not allow two policies of the same type to be attached -to the same cluster. This constraint may be relaxed in future, but for now, it -is checked and enforced before a policy gets attached to a cluster. - -Policies attached to a cluster are cached at the target cluster as part of its -runtime ``rt`` data structure. This is an optimization regarding DB queries. - - -Detaching a Policy from a Cluster ---------------------------------- - -Once a policy is attached to a cluster, it can be detached from the cluster at -user's request. The only parameter required for the ``policy_detach`` action -API is ``policy_id``, which can be the UUID, the name or the short ID of the -policy. - -Upon receiving a ``policy_detach`` request, the Senlin engine will perform -some validations then translate the request into a ``CLUSTER_DETACH_POLICY`` -action and queue the action for execution. The action's UUID is then returned -to Senlin API and finally the requestor. - -When the Senlin engine executes the ``CLUSTER_DETACH_POLICY`` action, it will -try find if the policy is already attached to the cluster. This checking was -not done previously because the engine must ensure that the cluster has been -locked before this checking, or else there might be race conditions. - -The engine calls the policy's ``detach`` method when detaching the policy from -the cluster and then removes the binding record from database if the -``detach`` method returns a True value. - -Policies attached to a cluster are cached at the target cluster as part of its -runtime ``rt`` data structure. This is an optimization regarding DB queries. -The ``CLUSTER_DETACH_POLICY`` action will invalidate the cache when detaching -a policy from a cluster. - - -Updating a Policy on a Cluster ------------------------------- - -When a policy is attached to a cluster, there are some properties pertaining -to the binding. These properties can be updated as long as the policy is still -attached to the cluster. The properties that can be updated include: - -- ``enabled``: a boolean value indicating whether the policy should be enabled - or disabled. There are cases where some policies have to be temporarily - disabled when other manual operations going on. - -Upon receiving the ``policy_update`` request, Senlin API performs some basic -validations on the parameters passed. - -Senlin engine translates the ``policy_update`` request into an action -``CLUSTER_UPDATE_POLICY`` and queue it for execution. The UUID of the action -is then returned to Senlin API and eventually the requestor. - -During execution of the ``CLUSTER_UPDATE_POLICY`` action, Senlin engine -simply updates the binding record in the database and returns. diff --git a/doc/source/contributor/event_dispatcher.rst b/doc/source/contributor/event_dispatcher.rst deleted file mode 100644 index b362bb0b7..000000000 --- a/doc/source/contributor/event_dispatcher.rst +++ /dev/null @@ -1,125 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -================= -Event Dispatchers -================= - -An event :term:`dispatcher` is a processor that converts a given action in -Senlin engine into certain format and then persists it into some storage or -sends it to downstream processing software. - -Since version 3.0.0, Senlin comes with some built-in dispatchers that can -dump event records into database and/or send event notifications via the -default message queue. The former is referred to as the ``database`` dispatcher -which is enabled by default; the latter is referred to as the ``message`` -dispatcher which has to be manually enabled by adding the following line to -the ``senlin.conf`` file:: - - event_dispatchers = message - -However, the distributors or the users can always add their own event -dispatchers easily when needed. - -Event dispatchers are managed as Senlin plugins. Once a new event dispatcher -is implemented, a deployer can enable it by first adding a new item to the -``senlin.dispatchers`` entries in the ``entry_points`` section of the -``setup.cfg`` file, followed by a reinstall of the Senlin service, i.e. -``sudo pip install`` command. - - -The Base Class ``EventBackend`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -All event dispatchers are expected to subclass the base class ``EventBackend`` -in the ``senlin.events.base`` module. The only requirement for a dispatcher -subclass is to override the ``dump()`` method that implements the processing -logic. - - -Providing New Dispatchers -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Developing A New Event Dispatcher ---------------------------------- - -The first step for adding a new dispatcher is to create a new file containing -a subclass of ``EventBackend``. In this new class, say ``JsonDispatcher``, -you will need to implement the ``dump()`` class method as exemplified below: - -.. code-block:: python - - class JsonDispatcher(base.EventBackend): - """Dispatcher for dumping events to a JSON file.""" - - @classmethod - def dump(cls, level, action, **kwargs): - # Your logic goes here - ... - -The ``level`` parameter for the method is identical to that defined by the -``logging`` module of Python. It is an integer representing the criticality -of an event. The ``action`` parameter is an instance of Senlin action class, -which is defined in the ``senlin.engine.actions.base`` module. There is -virtually no constraints on which properties you will pick and how you want to -process them. - -Finally, the ``**kwargs`` parameter may provide some useful fields for you -to use: - -* ``timestamp``: A datetime value that indicates when the event was generated. -* ``phase``: A string value indicating the phase an action is in. Most of the - time this can be safely ignored. -* ``reason``: There are some rare cases where an event comes with a textual - description. Most of the time, this is empty. -* ``extra``: There are even rarer cases where an event comes with some - additional fields for attention. This can be safely ignored most of the - time. - - -Registering the New Dispatcher ------------------------------- - -For Senlin service to be aware of and thus to make use of the new dispatcher, -you will register it to the Senlin engine service. This is done by editing the -``setup.cfg`` file in the root directory of the code base, for example: - -:: - - [entry_points] - senlin.dispatchers = - database = senlin.events.database:DBEvent - message = senlin.events.message:MessageEvent - jsonfile = : - -Finally, save that file and do a reinstall of the Senlin service, followed -by a restart of the ``senlin-engine`` process. - -:: - - $ sudo pip install -e . - - -Dynamically Enabling/Disabling a Dispatcher -------------------------------------------- - -All dispatchers are loaded when the Senlin engine is started, however, they -can be dynamically enabled or disabled by editing the ``senlin.conf`` file. -The option ``event_dispatchers`` in the ``[DEFAULT]`` section is a multi-string -value option for this purpose. To enable your dispatcher (i.e. ``jsonfile``), -you will need to add the following line to the ``senlin.conf`` file: - -:: - - event_dispatchers = jsonfile diff --git a/doc/source/contributor/node.rst b/doc/source/contributor/node.rst deleted file mode 100644 index cc8d52a73..000000000 --- a/doc/source/contributor/node.rst +++ /dev/null @@ -1,202 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -===== -Nodes -===== - -A node is a logical entity managed by the Senlin service. Each node can belong -to at most one cluster. A node that does not belong to any cluster can be -referred to as an "orphan" node. - - -Node Properties -~~~~~~~~~~~~~~~ - -There are some common properties that are defined for all nodes. The following -properties are always available on a node: - -- ``profile_id``: ID of the profile from which the node is created. -- ``cluster_id``: When a node is a member of a cluster, the ``cluster_id`` - value indicates the ID of the owning cluster. For an orphan node, this - property is empty. -- ``name``: The name of a node doesn't have to be unique even in the scope of - the owning cluster (if there is one). For nodes created by Senlin service - upon policy enforcement or when performing certain actions, Senlin engine - will generate names for them automatically. -- ``index``: Each node has an ``index`` value which is unique in the scope of - its owning cluster. The value is used to uniquely identify the node inside - a cluster. For orphan nodes, the ``index`` value will be -1. -- ``role``: Each node in a cluster may have a role to play. The value of this - property is a string that specifies the role a node plays in the owning - cluster. Each profile type may support different set of roles. -- ``user``: ID of the user who is the creator (owner) of the node. -- ``project``: ID of the Keystone project in which the node was created. -- ``domain``: ID of the Keystone domain in which the node was created. -- ``init_at``: The timestamp when the node object was initialized. -- ``created_at``: The timestamp when the node was created. -- ``updated_at``: The timestamp when last time the node was updated. -- ``metadata``: A list of key-value pairs that are associated with the node. -- ``physical_id``: The UUID of the physical object that backs this node. The - property value is empty if there are no physical objects associated with it. -- ``status``: A string indicating the current status of the node. -- ``status_reason``: A string describing the reason why the node transited to - its current status. -- ``dependents``: A dict contains dependency information between nova server/ - heat stack node and container node. The container node's id will be stored - in 'dependents' property of its host node. - -In addition to the above properties, when a node is retrieved and shown to the -user, Senlin provides a pseudo-property named ``profile_name`` for user's -convenience. - - -Cluster Membership -~~~~~~~~~~~~~~~~~~ - -A prerequisite for a node to become a member of a cluster is that the node -must share the same profile type with the cluster. When adding nodes to an -existing cluster, Senlin engine will check if the profile types actually -match. - -It is *NOT* treated as an error that a node has a different profile -(identified by the profile object's ID) from the cluster. The profile -referenced by the cluster can be interpreted as the 'desired' profile, while -the profile referenced by individual nodes can be treated as the 'actual' -profile(s). When the cluster scales out, new nodes will use the 'desired' -profile referenced by the cluster. When existing nodes are added to an -existing cluster, the existing nodes may have different profile IDs from the -cluster. In this case, Senlin will not force an unnecessary profile update to -the nodes. - - -Creating A Node -~~~~~~~~~~~~~~~ - -When receiving a request to create a node, Senlin API checks if any required -fields are missing and whether there are invalid values specified to some -fields. The following fields are required for a node creation request: - -- ``name``: Name of the node to be created; -- ``profile_id``: ID of the profile to be used for creating the backend - physical object. - -Optionally, the request can contain the following fields: - -- ``cluster_id``: When specified, the newly created node will become a - member of the specified cluster. Otherwise, the new node will be an orphan - node. The ``cluster_id`` provided can be a name of a cluster, the UUID of a - cluster or the short ID of a cluster. -- ``role``: A string value specifying the role the node will play inside the - cluster. -- ``metadata``: A list of key-value pairs to be associated with the node. - - -Listing Nodes -~~~~~~~~~~~~~ - -Nodes in the current project can be queried/listed using some query parameters. -None of these parameters is required. By default, the Senlin API will return -all nodes that are not deleted. - -When listing nodes, the following query parameters can be specified, -individually or combined: - -- ``filters``: a map containing key-value pairs that will be used for matching - node records. Records that fail to match this criteria will be filtered out. - The following strings are valid as filter keys: - - * ``name``: name of nodes to list, can be a string or a list of strings; - * ``status``: status of nodes, can be a string or a list of strings; - -- ``cluster_id``: A string specifying the name, the UUID or the short ID of a - cluster for which the nodes are to be listed. -- ``limit``: a number that restricts the maximum number of records to be - returned from the query. It is useful for displaying the records in pages - where the page size can be specified as the limit. -- ``marker``: A string that represents the last seen UUID of nodes in previous - queries. This query will only return results appearing after the - specified UUID. This is useful for displaying records in pages. -- ``sort``: A string to enforce sorting of the results. It accepts a list of - known property names of a node as sorting keys separated by commas. Each - sorting key can optionally have either ``:asc`` or ``:desc`` appended to the - key for controlling the sorting direction. -- ``show_nested``: A boolean indicating whether nested clusters should be - included in the results. The default is True. This feature is yet to be - supported. -- ``global_project``: A boolean indicating whether node listing should be done - in a tenant safe way. When this value is specified as False (the default), - only nodes from the current project that match the other criteria will be - returned. When this value is specified as True, nodes that matching all other - criteria would be returned, no matter in which project the node was created. - Only a user with admin privilege is permitted to do a global listing. - - -Getting a Node -~~~~~~~~~~~~~~ - -When a user wants to check the details about a specific node, he or she can -specify one of the following values for query: - -- Node UUID: Query is performed strictly based on the UUID value given. This - is the most precise query supported. -- Node name: Senlin allows multiple nodes to have the same name. It is user's - responsibility to avoid name conflicts if needed. The output is the details - of a node if the node name is unique, otherwise Senlin will return a message - telling users that multiple nodes found matching this name. -- short ID: Considering that UUID is a long string not so convenient to input, - Senlin supports a short version of UUIDs for query. Senlin engine will use - the provided string as a prefix to attempt a matching in the database. When - the "ID" is long enough to be unique, the details of the matching node is - returned, or else Senlin will return an error message indicating that - multiple nodes were found matching the specified short ID. - -Senlin engine service will try the above three ways in order to find a match -in database. - -In addition to the key for query, a user can provide an extra boolean option -named ``show_details``. When this option is set, Senlin service will retrieve -the properties about the physical object that backs the node. For example, for -a Nova server, this information will contain the IP address allocated to the -server, along with other useful information. - -In the returned result, Senlin injects the name of the profile used by the -node for the user's convenience. - - -Updating a Node -~~~~~~~~~~~~~~~ - -Some node properties are updatable after the node has been created. These -properties include: - -- ``name``: Name of node as seen by the user; -- ``role``: The role that is played by the node in its owning cluster; -- ``metadata``: The key-value pairs attached to the node; -- ``profile_id``: The ID of the profile used by the node. - -Note that update of ``profile_id`` is different from the update of other -properties in that it may take time to complete. When receiving a request to -update the profile used by a node, the Senlin engine creates an Action that -is executed asynchronously by a worker thread. - -When validating the node update request, Senlin rejects requests that attempt -to change the profile type used by the node. - - -Deleting a Node -~~~~~~~~~~~~~~~ - -A node can be deleted no matter if it is a member of a cluster or not. Node -deletion is handled asynchronously in Senlin. When the Senlin engine receives -a request, it will create an Action to be executed by a worker thread. diff --git a/doc/source/contributor/osprofiler.rst b/doc/source/contributor/osprofiler.rst deleted file mode 100644 index e5a67143a..000000000 --- a/doc/source/contributor/osprofiler.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========== -OSProfiler -========== - -OSProfiler provides a tiny but powerful library that is used by -most (soon to be all) OpenStack projects and their python clients. It -provides functionality to be able to generate 1 trace per request, that goes -through all involved services. This trace can then be extracted and used -to build a tree of calls which can be quite handy for a variety of -reasons (for example in isolating cross-project performance issues). - -More about OSProfiler: -https://docs.openstack.org/osprofiler/latest/ - -Senlin supports using OSProfiler to trace the performance of each -key internal processing, including RESTful API, RPC, cluster actions, -node actions, DB operations etc. - -Enabling OSProfiler -~~~~~~~~~~~~~~~~~~~ - -To configure DevStack to enable OSProfiler, edit the -``${DEVSTACK_DIR}/local.conf`` file and add:: - - enable_plugin panko https://git.openstack.org/openstack/panko - enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer - enable_plugin osprofiler https://git.openstack.org/openstack/osprofiler - -to the ``[[local|localrc]]`` section. - -.. note:: The order of enabling plugins matter. - -Using OSProfiler -~~~~~~~~~~~~~~~~ - -After successfully deploy your development environment, following profiler -configs will be auto added to ``senlin.conf``:: - - [profiler] - enabled = true - trace_sqlalchemy = true - hmac_keys = SECRET_KEY - -``hmac_keys`` is the secret key(s) to use for encrypting context data for -performance profiling, default value is 'SECRET_KEY', you can modify it to -any random string(s). - -Run any command with ``--os-profile SECRET_KEY``:: - - $ openstack --os-profile SECRET_KEY cluster profile list - # it will print - -Get pretty HTML with traces:: - - $ osprofiler trace show --html diff --git a/doc/source/contributor/plugin_guide.rst b/doc/source/contributor/plugin_guide.rst deleted file mode 100644 index dfaa969d9..000000000 --- a/doc/source/contributor/plugin_guide.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -===================== -Plugin Writer's Guide -===================== - -Senlin provides an open design where developer can incorporate new profile -or policy implementations for different purposes. The following documents -describe how to develop and plug your own profile types and/or policy types. - - -.. toctree:: - :maxdepth: 1 - - policy_type - profile_type - event_dispatcher diff --git a/doc/source/contributor/policies/affinity_v1.rst b/doc/source/contributor/policies/affinity_v1.rst deleted file mode 100644 index 71fa758e1..000000000 --- a/doc/source/contributor/policies/affinity_v1.rst +++ /dev/null @@ -1,231 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -==================== -Affinity Policy V1.0 -==================== - -This policy is designed for Senlin clusters to exploit the *servergroup* API -exposed by the Nova compute service. The basic policy has been extended to -work with vSphere hypervisor when VMware DRS feature is enabled. However, such -an extension is only applicable to *admin* owned server clusters. - -.. schemaspec:: - :package: senlin.policies.affinity_policy.AffinityPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -- ``CLUSTER_RESIZE``: an action that carries various input parameters to - resize a cluster. The policy will try to parse the raw inputs if no other - policies have done this. - -- ``NODE_CREATE``: an action originated from a node creation RPC request. - The policy is capable of processing the node associated with this action. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. When the action is ``CLUSTER_RESIZE``, the affinity policy will -check if it is about the creation of new nodes. If the resize request is about -the removal of existing nodes, the policy won't block the request. - -Senlin engine respects outputs (i.e. number of nodes to create) from other -policies, if any. If no such data exists, it then checks the user-provided -"``count``" input if there is one. The policy is also designed to parse a -cluster resize request and see if there are new nodes to be created. - -After validating the ``count`` value, the affinity policy proceeds to update -the ``data`` property of the action with node placement data. For example: - -:: - - { - 'placement': { - 'count': 2, - 'placements': [ - {'servergroup': 'XYZ-ABCD'}, - {'servergroup': 'XYZ-ABCD'} - ] - } - } - - -Scenarios -~~~~~~~~~ - -S1: Inheriting Server Group from Profile ----------------------------------------- - -When attaching the affinity policy to a cluster that is based on a profile -type of ``os.nova.server-1.0``, if the profile contains ``scheduler_hints`` -property and the property value (a collection) has a ``group`` key, the engine -will use the value of the ``group`` key as a Nova server group name. In this -case, the affinity policy will check if the specified server group does exist. -If the group doesn't exist, or the rules specified in the group doesn't match -that specified (or implied) by the affinity policy, you will get an error when -attaching the policy to the cluster. If, on the contrary, the group is found -and the rules do match that of the current policy, the engine will record the -ID of the server group into the policy binding data. The engine also saves a -key-value pair ``inherited_group: True`` into the policy binding data, so that -in future the engine knows that the server group wasn't created from scratch -by the affinity policy. This will lead to the following data stored into the -policy binding data: - -:: - - { - 'AffinityPolicy': { - 'version': 1.0, - 'data': { - 'servergroup_id': 'XYZ-ABCD', - 'inherited_group': True - } - } - } - -When an affinity policy is to be detached from a cluster, the Senlin engine -will check and learn the server group was not created by the affinity policy. -The engine will not delete the server group. - -Before any of the targeted actions is executed, the affinity policy gets a -chance to be checked. It does so by looking into the policy binding data and -find out the server group ID to use. For node creation requests, the policy -will yield some data into ``action.data`` property that looks like: - -:: - - { - 'placement': { - 'count': 2, - 'placements': [ - {'servergroup': 'XYZ-ABCD'}, - {'servergroup': 'XYZ-ABCD'} - ] - } - } - - -S2: Creating A Server Group when Needed ---------------------------------------- - -When attaching an affinity policy to a cluster, if the cluster profile doesn't -contain a ``scheduler_hints`` property or there is no ``group`` value -specified in the ``scheduler_hints`` property, the engine will create a new -server group by invoking the Nova API, providing it the policies specified (or -implied) as inputs. The ID of the newly created server group is then saved -into the policy binding data, along with a ``inherited_group: False`` key-value -pair. For example: - -:: - - { - 'AffinityPolicy': { - 'version': 1.0, - 'data': { - 'servergroup_id': 'XYZ-ABCD', - 'inherited_group': False - } - } - } - -When such a policy is later detached from the cluster, the Senlin engine will -check and learn that the server group should be deleted. It then deletes the -server group by invoking Nova API. - -When the targeted actions are about to be executed, the protocol for checking -and data saving is identical to that outlined in scenario *S1*. - - -S3: Enabling vSphere DRS Extensions ------------------------------------ - -When you have vSphere hosts (with DRS feature enabled) serving hypervisors to -Nova, a vSphere host is itself a collection of physical nodes. To make better -use of the vSphere DRS feature, you can enable the DRS extension by specifying -``enable_drs_extension: True`` in your affinity policy. - -When attaching and detaching the affinity policy to/from a cluster, the engine -operations are the same as described in scenario *S1* and *S2*. However, when -one of the targeted actions is triggered, the affinity policy will first check -if the ``availability_zone`` property is set and it will use "``nova``" as the -default value if not specified. - -The engine then continues to check the input parameters (as outlined above) to -find out the number of nodes to create. It also checks the server group ID to -use by looking into the policy binding data. - -After the policy has collected all inputs it needs, it proceeds to check the -available vSphere hypervisors with DRS enabled. It does so by looking into the -``hypervisor_hostname`` property of each hypervisor reported by Nova -(**Note**: retrieving hypervisor list is an admin-only API, and that is the -reason the vSphere extension is only applicable to admin-owned clusters). -The policy attempts to find a hypervisor whose host name contains ``drs``. If -it fails to find such a hypervisor, the policy check fails with the action's -``data`` field set to: - -:: - - { - 'status': 'ERROR', - 'status_reason': 'No suitable vSphere host is available.' - } - -The affinity uses the first matching hypervisor as the target host and it -forms a string containing the availability zone name and the hypervisor -host name, e.g. "``nova:vsphere_drs_1``". This string will later be used as -the availability zone name sent to Nova. For example, the following is sample -result when applying the affinity policy to a cluster with vSphere DRS -enabled. - -:: - - { - 'placement': { - 'count': 2, - 'placements': [{ - 'zone': 'nova:vsphere_drs_1', - 'servergroup': 'XYZ-ABCD' - }, { - 'zone': 'nova:vsphere_drs_1', - 'servergroup': 'XYZ-ABCD' - } - ] - } - } - -**NOTE**: The ``availability_zone`` property is effective even when the -vSphere DRS extension is not enabled. When ``availability_zone`` is explicitly -specified, the affinity policy will pass it along with the server group ID -to the Senlin engine for further processing, e.g.: - -:: - - { - 'placement': { - 'count': 2, - 'placements': [{ - 'zone': 'nova_1', - 'servergroup': 'XYZ-ABCD' - }, { - 'zone': 'nova_1', - 'servergroup': 'XYZ-ABCD' - } - ] - } - } diff --git a/doc/source/contributor/policies/deletion_v1.rst b/doc/source/contributor/policies/deletion_v1.rst deleted file mode 100644 index 46f4629fd..000000000 --- a/doc/source/contributor/policies/deletion_v1.rst +++ /dev/null @@ -1,272 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -==================== -Deletion Policy V1.1 -==================== - -The deletion policy is designed to be enforced when a cluster's size is to be -shrunk. - -.. schemaspec:: - :package: senlin.policies.deletion_policy.DeletionPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_DEL_NODES``: an action that carries a list value named - ``candidates`` in its ``inputs`` value. - -- ``CLUSTER_RESIZE``: an action that carries various key-value pairs as - arguments to the action in its ``inputs`` value. - -- ``NODE_DELETE``: an action that has a node associated with it. This action - has to be originated from a RPC request directly so that it will be - processed by the deletion policy. The node ID associated with the action - obviously become the 'candidate' node for deletion. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. - - -Scenarios -~~~~~~~~~ - -Under different scenarios, the policy works by checking different properties -of the action. - - -S1: ``CLUSTER_DEL_NODES`` -------------------------- - -This is the simplest case. An action of ``CLUSTER_DEL_NODES`` carries a list of -UUIDs for the nodes to be removed from the cluster. The deletion policy steps -in before the actual deletion happens so to help determine the following -details: - -- whether the nodes should be destroyed after being removed from the cluster; -- whether the nodes should be granted a grace period before being destroyed; -- whether the ``desired_capacity`` of the cluster in question should be - reduced after node removal. - -After the policy check, the ``data`` field is updated with contents similar to -the following example: - -:: - - { - "status": "OK", - "reason": "Candidates generated", - "deletion": { - "count": 2, - "candidates": ["", "` attached to the cluster, -Senlin engine takes the liberty to assume that the expectation is to remove -1 node from the cluster. This is equivalent to the case when ``count`` is -specified as ``1``. - -The policy then continues evaluate the cluster nodes to select ``count`` -victim node(s) based on the ``criteria`` property of the policy. Finally it -updates the action's ``data`` field with the list of node candidates along -with other properties, as described in scenario **S1**. - - -S3: ``CLUSTER_SCALE_IN`` with Scaling Policy --------------------------------------------- - -If there is a :doc:`scaling policy ` attached to the cluster, that -policy will yield into the action's ``data`` property some contents similar to -the following example: - -:: - - { - "deletion": { - "count": 2 - } - } - -The senlin engine will use value from the ``deletion.count`` field in the -``data`` property as the number of nodes to remove from cluster. It selects -victim nodes from the cluster based on the ``criteria`` specified and then -updates the action's ``data`` property along with other properties, as -described in scenario **S1**. - - -S4: ``CLUSTER_RESIZE`` without Scaling Policy ---------------------------------------------- - -If there is no :doc:`scaling policy ` attached to the cluster, -the deletion policy won't be able to find a ``deletion.count`` field in the -action's ``data`` property. It then checks the ``inputs`` property of the -action directly and generates a ``deletion.count`` field if the request turns -out to be a scaling-in operation. If the request is not a scaling-in -operation, the policy check aborts immediately. - -After having determined the number of nodes to remove, the policy proceeds to -select victim nodes based on its ``criteria`` property value. Finally it -updates the action's ``data`` field with the list of node candidates along -with other properties, as described in scenario **S1**. - - -S5: ``CLUSTER_RESIZE`` with Scaling Policy ------------------------------------------- - -In the case there is already a :doc:`scaling policy ` attached to -the cluster, the scaling policy will be evaluated before the deletion policy, -so the policy works in the same way as described in scenario **S3**. - - -S6: Deletion across Multiple Availability Zones ------------------------------------------------ - -When you have a :doc:`zone placement policy ` attached to -a cluster, the zone placement policy will decide in which availability zone(s) -new nodes will be placed and from which availability zone(s) old nodes should -be deleted to maintain an expected node distribution. Such a zone placement -policy will be evaluated before this deletion policy, according to its builtin -priority value. - -When scaling in a cluster, a zone placement policy yields a decision into the -action's ``data`` property that looks like: - -:: - - { - "deletion": { - "count": 3, - "zones": { - "AZ-1": 2, - "AZ-2": 1 - } - } - } - -The above data indicate how many nodes should be deleted globally and how many -nodes should be removed from each availability zone. The deletion policy then -evaluates nodes from each availability zone to select specified number of -nodes as candidates. This selection process is also based on the ``criteria`` -property of the deletion policy. - -After the evaluation, the deletion policy completes by modifying the ``data`` -property to something like: - -:: - - { - "status": "OK", - "reason": "Candidates generated", - "deletion": { - "count": 3, - "candidates": ["node-id-1", "node-id-2", "node-id-3"] - "destroy_after_deletion": true, - "grace_period": 0 - } - } - -In the ``deletion.candidates`` list, two of the nodes are from availability -zone ``AZ-1``, one of the nodes is from availability zone ``AZ-2``. - -S6: Deletion across Multiple Regions ------------------------------------- - -When you have a :doc:`region placement policy ` attached -to a cluster, the region placement policy will decide to which region(s) new -nodes will be placed and from which region(s) old nodes should be deleted to -maintain an expected node distribution. Such a region placement policy will be -evaluated before this deletion policy, according to its builtin priority value. - -When scaling in a cluster, a region placement policy yields a decision into -the action's ``data`` property that looks like: - -:: - - { - "deletion": { - "count": 3, - "region": { - "R-1": 2, - "R-2": 1 - } - } - } - -The above data indicate how many nodes should be deleted globally and how many -nodes should be removed from each region. The deletion policy then evaluates -nodes from each region to select specified number of nodes as candidates. This -selection process is also based on the ``criteria`` property of the deletion -policy. - -After the evaluation, the deletion policy completes by modifying the ``data`` -property to something like: - -:: - - { - "status": "OK", - "reason": "Candidates generated", - "deletion": { - "count": 3, - "candidates": ["node-id-1", "node-id-2", "node-id-3"] - "destroy_after_deletion": true, - "grace_period": 0 - } - } - -In the ``deletion.candidates`` list, two of the nodes are from region ``R-1``, -one of the nodes is from region ``R-2``. - - -S7: Handling ``NODE_DELETE`` Action ------------------------------------ - -If the action that triggered the policy checking is a ``NODE_DELETE`` action, -the action has an associated node as its property. When the deletion policy -has detected this action type, it will copy the policy specification values -into the action's ``data`` field although the ``count`` and ``candidates`` -value are so obvious. For example: - -:: - - { - "status": "OK", - "reason": "Candidates generated", - "deletion": { - "count": 1, - "candidates": ["node-id-1"] - "destroy_after_deletion": true, - "grace_period": 0 - } - } diff --git a/doc/source/contributor/policies/health_v1.rst b/doc/source/contributor/policies/health_v1.rst deleted file mode 100644 index 64516a038..000000000 --- a/doc/source/contributor/policies/health_v1.rst +++ /dev/null @@ -1,373 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -================== -Health Policy V1.1 -================== - -The health policy is designed to automate the failure detection and recovery -process for a cluster. - -.. schemaspec:: - :package: senlin.policies.health_policy.HealthPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_RECOVER``: an action that carries some optional parameters as its - inputs. The parameters are specific to the profile type of the target - cluster. - -- ``CLUSTER_DEL_NODES``: an action that carries a list value named - ``candidates`` in its ``inputs`` value. - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs`` value. - -- ``CLUSTER_RESIZE``: an action that carries various key-value pairs as - arguments to the action in its ``inputs`` value. - -- ``NODE_DELETE``: an action that has a node associated with it. This action - has to be originated from a RPC request directly so that it will be - processed by the health policy. - -The policy will be checked **BEFORE** a ``CLUSTER_RECOVER`` action is executed. -It will derive the appropriate inputs to the action based on the policy's -properties. - -The policy will be checked **BEFORE** and **AFTER** any one of the -``CLUSTER_DEL_NODES``, ``CLUSTER_SCALE_IN``, ``CLUSTER_RESIZE`` and the -``NODE_DELETE`` action is executed. Under the condition that any of these -actions are originated from RPC requests, Senlin is aware of the fact that -a cluster is losing node member(s) because of a normal cluster membership -management operation initiated by users rather than unexpected node failures. -The health policy will temporarily disable the *health manager* function on -the cluster in question and re-enable the health management after the action -has completed. - -The health policy can be treated as an interface for the *health manager* -engine running inside the ``senlin-engine`` process. Its specification -contains two main "sections", ``detection`` and ``recovery``, each of which -specifies how to detect node failures and how to recover a node to a healthy -status respectively. - - -Failure Detection -~~~~~~~~~~~~~~~~~ - -The health policy is designed to be flexible regarding node failure detection. -The current vision is that the health policy will support following types -of failure detection: - -* ``NODE_STATUS_POLLING``: the *health manager* periodically polls a cluster - and checks if there are nodes inactive. - -* ``NODE_STATUS_POLL_URL``: the *health manager* periodically polls a URL - and checks if a node is considered healthy based on the response. - -* ``LIFECYCLE_EVENTS``: the *health manager* listens to event notifications - sent by the backend service (e.g. nova-compute). - -* ``LB_STATUS_POLLING``: the *health manager* periodically polls the load - balancer (if any) and see if any node has gone offline. - -The third option above (``LB_STATUS_POLLING``) is not usable yet due to an -outstanding issue in the LBaaS service. But we are still tracking its progress -considering that metrics from the load-balancer is more trust-worthy and more -useful because they originate from the data plane rather than the control -plane. - -Yet another option regarding load-balancer based health detection is to have -the load-balancer emit event notifications when node status changes. This is -also an ongoing work which may take some time to land. - - -Proactive Node Status Polling ------------------------------ - -The most straight-forward way of node failure detection is by checking the -backend service about the status of the physical resource represented by a -node. If the ``type`` of ``detection`` is set to "``NODE_STATUS_POLLING``" -(optionally, with an ``interval`` value specified), the *health manager* will -periodically check the resource status by querying the backend service and see -if the resource is active. Below is a sample configuration:: - - type: senlin.policy.health - version: 1.1 - properties: - detection: - interval: 120 - detection_modes: - - type: NODE_STATUS_POLLING - ... - -Once such a policy object is attached to a cluster, Senlin registers the -cluster to the *health manager* engine for failure detection, i.e., node -health checking. A thread is created to periodically call Nova to check the -status of the node. If the server status is ERROR, SHUTOFF or DELETED, the node -is considered unhealthy. - -When one of the ``senlin-engine`` services is restarted, a new *health manager* -engine will be launched. This new engine will check the database and see if -there are clusters which have health policies attached and thus having its -health status maintained by a *health manager* that is no longer alive. The -new *health manager* will pick up these clusters for health management. - - -Polling Node URL ----------------- - -The health check for a node can also be configured to periodically query a -URL with the ``NODE_STATUS_POLL_URL`` detection type. The URL can optionally -contain expansion parameters. Expansion parameters are strings enclosed in {} -that will be substituted with the node specific value by Senlin prior to -querying the URL. The only valid expansion parameter at this point is -``{nodename}``. This expansion parameter will be replaced with the name of the -Senlin node. Below is a sample configuration:: - - - type: senlin.policy.health - version: 1.1 - properties: - detection: - interval: 120 - detection_modes: - - type: NODE_STATUS_POLL_URL - options: - poll_url: "http://{nodename}/healthstatus" - poll_url_healthy_response: "passing" - poll_url_conn_error_as_unhealty: true - poll_url_retry_limit: 3 - poll_url_retry_interval: 2 - ... - - -.. note:: - ``{nodename}`` can be used to query a URL implemented by an - application running on each node. This requires that the OpenStack cloud - is setup to automatically register the name of new server instances with - the DNS service. In the future support for a new expansion parameter for - node IP addresses may be added. - -Once such a policy object is attached to a cluster, Senlin registers the -cluster to the *health manager* engine for failure detection, i.e., node -health checking. A thread is created to periodically make a GET request on the -specified URL. ``poll_url_conn_error_as_unheathy`` specifies the behavior if -the URL is unreachable. A node is considered healthy if the response to the GET -request includes the string specified by ``poll_url_healthy_response``. If it -does not, Senlin will retry the URL health check for the number of times -specified by ``poll_url_retry_limit`` while waiting the number of seconds in -``poll_url_retry_interval`` between each retry. If the URL response still does -not contain the expected string after the retries, the node is considered -healthy. - - -Listening to Event Notifications --------------------------------- - -For some profile types (currently ``os.nova.server``), the backend service may -emit an event notification on the control plane message bus. These events are -more economic ways for node failure detection, assuming that all kinds of -status changes will be captured and reported by the backend service. Actually, -we have verified that most lifecycle events related to a VM server are already -captured and reported by Nova. For other profile types such as -``os.heat.stack``, there also exists such a possibility although based on our -knowledge Heat cannot detect all stack failures. - -Event listening is a cheaper way for node failure detection when compared to -the status polling approach described above. To instruct the *health manager* -to listen to event notifications, users can attach their cluster(s) a health -policy which looks like the following example:: - - type: senlin.policy.health - version: 1.1 - properties: - detection: - type: LIFECYCLE_EVENTS - - ... - -When such a policy is attached to a cluster, Senlin registers the cluster to -the *health manager* engine for failure detection, i.e., node health checking. -A listener thread is created to listen to events that indicate certain node -has failed. For nova server nodes, the current implementation treats all of -the following event types as indication of node failures: - -* ``compute.instance.pause.end``: A server has been accidentally paused. -* ``compute.instance.power_off.end``: A server has been stopped accidentally. -* ``compute.instance.rebuild.error``: A server rebuild has failed. -* ``compute.instance.shutdown.end``: A server has been shut down for unknown - reasons. -* ``compute.instance.soft_delete.end``: A server has been soft deleted. - -When any one of such an event is heard by the listener thread, it will issue -a ``NODE_RECOVER`` RPC request to the senlin-engine service. For the health -policy to make a smarter decision on the proper recover operation, the RPC -request is augmented with some parameters as hints to the recovery operation -as exemplified below:: - - { - "event": "SHUTDOWN", - "state": "shutdown", - "instance_id": "449ad837-3db2-4aa9-b324-ecd28e14ab14", - "timestamp": "2016-11-27T12:10:58Z", - "publisher": "nova-compute:node1", - } - -Ideally, a health management solution can react differently based on the -different types of failures detected. For example, a server stopped by accident -can be simply recovered by start it again; a paused server can be unpaused -quickly instead of being recreated. - -When one of the ``senlin-engine`` services is restarted, a new *health manager* -engine will be launched. This new engine will check the database and see if -there are clusters which have health policies attached and thus having its -health status maintained by a *health manager* that is no longer alive. The -new *health manager* will pick up these clusters for health management. - - -Recovery Actions -~~~~~~~~~~~~~~~~ - -The value of the recovery ``actions`` key for ``recovery`` is modeled as a -list, each of which specifies an action to try. The list of actions are to be -adjusted by the policy before passing on to a base ``Profile`` for actual -execution. An example (imaginary) list of actions is shown below:: - - type: senlin.policy.health - version: 1.0 - properties: - ... - recovery: - actions: - - name: REBOOT - params: - type: soft - - name: REBUILD - - name: my_evacuation_workflow - type: MISTRAL_WORKFLOW - params: - node_id: {{ node.physicalid }} - -The above specification basically tells Senlin engine to try a list of -recovery actions one by one. The first thing to try is to "reboot" (an -operation only applicable on a Nova server) the failed node in question. If -that didn't solve the problem, the engine is expected to "rebuild" (also a -Nova server specific verb) the failed node. If this cannot bring the node back -to healthy status, the engine should execute a Mistral workflow named -"``my_evacuation_workflow``" and pass in the physical ID of the node. - -The health policy is triggered when a ``CLUSTER_RECOVER`` action is to be -executed. Using the above example, the policy object will fill in the ``data`` -field of the action object with the following content:: - - { - "health": { - "recover_action": [ - { - "name": "REBOOT", - "params": { - "type": "soft" - } - }, - { - "name": "REBUILD" - }, - { - "name": "my_evacuation_workflow", - "type": "MISTRAL_WORKFLOW", - "params": { - "node_id": "7a753f4b-417d-4c10-8065-681f60db0c9a" - } - } - ] - ... - } - } - -This action customization is eventually passed on to the ``Profile`` base -class where the actual actions are performed. - -**NOTE**: Currently, we only support a single action in the list. The support -to Mistral workflow is also an ongoing work. - - -Default Recovery Action ------------------------ - -Since Senlin is designed to manage different types of resources, each resource -type, i.e. :term:`Profile Type`, may support different sets of operations that -can be used for failure recovery. - -A more practical and more general operation to recover a failed resource is to -delete the old one followed by creating a new one, thus a ``RECREATE`` -operation. Note that the ``RECREATE`` action is although generic enough, it -may and may not be what users want. For example, there is not guarantee that a -recreated Nova server will preserve its physical ID or its IP address. The -temporary status of the original server will be lost for sure. - - -Profile-specific Recovery Actions ---------------------------------- - -Each profile type supports a unique set of operations, some of which are -relevant to failure recovery. For example, a Nova server may support many -operations that can be used for failure recovery, a Heat stack may support -only the ``STACK_UPDATE`` operation for recovery. This set of actions that can -be specified for recovery is profile specific, thus an important part for the -policy to check and validate. - - -External Recovery Actions -------------------------- - -In real-life deployments, there are use cases where a simple recovery of a -node itself is not sufficient to bring back the business services or -applications that were running on those nodes. There are other use cases where -appropriate actions must be taken on the storage and/or network used for a -full failure recovery. These are the triggers for the Senlin team to bring in -support to Mistral workflows as special actions. - -The current design is to allow for a mixture of built-in recovery actions and -user provided workflows. In the foreseeable future, Senlin does not manage the -workflows to be executed and the team has no plan to support the debugging of -workflow executions. Users have to make sure their workflows are doing things -they want. - - -Fencing Support -~~~~~~~~~~~~~~~ - -The term "fencing" is used to describe the operations that make sure a -seemingly failed resource is dead for sure. This is a very important aspect in -all high-availability solutions. Take a Nova server failure as an example, -there are many causes which can lead the server into an inactive status. A -physical host crash, a network connection breakage etc. can all result in a -node unreachable. From Nova controller's perspective, it may appear that the -host has gone offline, however, what really happened could be just the -management network is experiencing some problems. The host is actually still -there, all the VM instances on it are still active, which means they are still -processing requests and they are still using the IP addresses allocated to -them by a DHCP server. - -There are many such cases where a seemingly inactive node is still working and -these nodes will bring the whole cluster into unpredictable status if we only -attempt an immature recovery action without considering the possibility that -the nodes are still alive. - -Considering this, we are working on modeling and implementing support to -fencing in the health policy. diff --git a/doc/source/contributor/policies/load_balance_v1.rst b/doc/source/contributor/policies/load_balance_v1.rst deleted file mode 100644 index ac9d13044..000000000 --- a/doc/source/contributor/policies/load_balance_v1.rst +++ /dev/null @@ -1,258 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -========================== -Load Balancing Policy V1.1 -========================== - -This policy is designed to enable senlin clusters to leverage the Neutron -LBaaS V2 features so that workloads can be distributed across nodes in a -reasonable manner. - -.. schemaspec:: - :package: senlin.policies.lb_policy.LoadBalancingPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_ADD_NODES``: an action that carries a list of node IDs for the - nodes (servers) to be added into the cluster. - -- ``CLUSTER_DEL_NODES``: an action that carries a list of node IDs for the - nodes (servers) to be removed from the cluster. - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -- ``CLUSTER_RESIZE``: an action that carries some additional parameters that - specifying the details about the resize request, e.g. ``adjustment_type``, - ``number`` etc. in its ``inputs``. - -- ``NODE_CREATE``: an action originated directly from RPC request and it has - a node associated with it. - -- ``NODE_DELETE``: an action originated directly from RPC request and it has - a node associated with it. - -The policy will be checked **AFTER** one of the above mentioned actions that -adds new member nodes for the cluster is executed. It is also checked -**BEFORE** one of the above actions that removes existing members from the -cluster is executed. - - -Policy Properties -~~~~~~~~~~~~~~~~~ - -The load-balancing policy has its properties grouped into three categories: -``pool``, ``vip`` and ``health_monitor``. The ``pool`` property accepts a map -that contains detailed specification for the load-balancing pool that -contains the nodes as members such as "``protocol``", "``protocol_port``", -"``subnet``", "``lb_method``" etc. Most of the properties have a default value -except for the "``subnet``" which always requires an input. - -The ``vip`` property also accepts a map that contains detailed specification -for the "virtual IP address" visible to the service users. These include for -example "``subnet``", "``address``", "``protocol``", "``protocol_port``" -values to be associated/assigned to the VIP. - -The ``health_monitor`` property accepts a map that specifies the details about -the configuration of the "health monitor" provided by (embedded into) the -load-balancer. The map may contain values for keys like "``type``", -"``delay``", "``max_retries``", "``http_method``" etc. - -For more details specifications of the policy specifications, you can use the -:command:`openstack cluster policy type show senlin.policy.loadbalance-1.1` -command. - - -Load Balancer Management -~~~~~~~~~~~~~~~~~~~~~~~~ - -When attaching a loadbalance policy to a cluster, the engine will always try -to create a new load balancer followed by adding existing nodes to the new -load-balancer created. If any member node cannot be added to the -load-balancer, the engine refuses to attach the policy to the cluster. - -After having successfully added a node to the load balancer, the engine saves -a key-value pair "``lb_member: ``" into the ``data`` field of the node. -After all existing nodes have been successfully added to the load balancer, -the engine saves the load balancer information into the policy binding data. -The information stored is something like the following example: - -:: - - { - "LoadBalancingPolicy": { - "version": 1.0, - "data": { - "loadbalancer": "bb73fa92-324d-47a6-b6ce-556eda651532", - "listener": "d5f621dd-5f93-4adf-9c76-51bc4ec9f313", - "pool": "0f58df07-77d6-4aa0-adb1-8ac6977e955f", - "healthmonitor": "83ebd781-1417-46ac-851b-afa92844252d" - } - } - } - -When detaching a loadbalance policy from a cluster, the engine first checks -the information stored in the policy binding data where it will find the IDs -of the load balancer, the listener, the health monitor etc. It then proceeds -to delete these resources by invoking the LBaaS APIs. If any of the resources -cannot be deleted for some reasons, the policy detach request will be -rejected. - -After all load balancer resources are removed, the engine will iterate through -all cluster nodes and delete the "``lb_member``" key-value pair stored there. -When all nodes have been virtually detached from the load-balancer, the detach -operation returns with a success. - - -Scenarios -~~~~~~~~~ - -S1: ``CLUSTER_SCALE_IN`` ------------------------- - -When scaling in a cluster, there may and may not be a scaling policy attached -to the cluster. The loadbalance policy has to cope with both cases. The -loadbalance policy first attempts to get the number of nodes to remove then it -tries to get the candidate nodes for removal. - -It will first check if there is a "``deletion``" key in the action's ``data`` -field. If it successfully finds it, it means there are other policies already -helped decide the number of nodes to remove, even the candidate nodes for -removal. If the "``deletion``" key is not found, it means the policy has to -figure out the deletion count itself. It first checks if the action has an -input named "``count``". The ``count`` value will be used if found, or else it -will assume the ``count`` to be 1. - -When the policy finds that the candidate nodes for removal have not yet been -chosen, it will try a random selection from all cluster nodes. - -After the policy has figured out the candidate nodes for removal, it invokes -the LBaaS API to remove the candidates from the load balancer. If any of the -removal operation fails, the scale in operation fails before node removal -actually happens. - -When all candidates have been removed from the load balancer, the scale in -operation continues to delete the candidate nodes. - -S2: ``CLUSTER_DEL_NODES`` -------------------------- - -When deleting specified nodes from a cluster, the candidate nodes are already -provided in the action's ``inputs`` property, so the loadbalance policy just -iterate the list of candidate nodes to update the load balancer. The load -balancer side operation is identical to that outlined in scenario *S1*. - -S3: ``CLUSTER_RESIZE`` that Shrinks a Cluster ---------------------------------------------- - -For a cluster resize operation, the loadbalance policy is invoked **BEFORE** -the operation is attempting to remove any nodes from the cluster. If there are -other policies (such as a scaling policy or a deletion policy) attached to the -cluster, the number of nodes along with the candidate nodes might have already -been decided. - -The policy first checks the "``deletion``" key in the action's ``data`` field. -If it successfully finds it, it means there are other policies already helped -decide the number of nodes to remove, even the candidate nodes for removal. -If the "``deletion``" key is not found, it means the policy has to figure out -the deletion count itself. In the latter case, the policy will try to parse -the ``inputs`` property of the action and see if it is about to delete nodes -from the cluster. If the action is indeed about removing nodes, then the -policy gets what it wants, i.e. the ``count`` value. If the action is not -about deleting nodes, then the action passes the policy check directly. - -After having figured out the number of nodes to delete, the policy may still -need to decide which nodes to remove, i.e. the candidates. When no other -policy has made a decision, the loadbalance policy randomly chooses the -specified number of nodes as candidates. - -After the candidates is eventually selected, the policy proceeds to update the -load balancer as outlined in scenario *S1*. - -S4: ``CLUSTER_SCALE_OUT`` -------------------------- - -The policy may be checked **AFTER** a scale out operation is performed on the -cluster. After new nodes have been created into the cluster, the loadbalance -policy needs to notify the load balancer about the new members added. -When the loadbalance policy is checked, there may and may not be other -policies attached to the cluster. So the policy will need to check both cases. - -It first checks if there is a "``creation``" key in the action's ``data`` -field. If the "``creation``" key is not found, it means the operation has -nothing to do with the loadbalance policy. For example, it could be a request -to resize a cluster, but the result is about removal of existing nodes instead -of creation of new nodes. In this case, the policy checking aborts immediately. - -When new nodes are created, the operation is expected to have filled the -action's ``data`` field with data that looks like the following example: - -:: - - { - "creation": { - "count": 2, - "nodes": [ - "4e54e810-6579-4436-a53e-11b18cb92e4c", - "e730b3d0-056a-4fa3-9b1c-b1e6e8f7d6eb", - ] - } - } - -The "``nodes``" field in the ``creation`` map always contain a list of node -IDs for the nodes that have been created. After having get the node IDs, the -policy proceeds to add these nodes to the load balancer (recorded in the -policy binding data) by invoking the LBaaS API. If any update operation to the -load balancer fails, the policy returns with an error message. If a node has -been successfully added to the load balancer, the engine will record the -load balancer IDs into the node's ``data`` field. - -S5: ``CLUSTER_ADD_NODES`` -------------------------- - -When a ``CLUSTER_ADD_NODES`` operation is completed, it will record the IDs of -the nodes into the ``creation`` property of the action's ``data`` field. The -logic to update the load balancer and the logic to update the ``data`` field -of individual nodes are identical to that described in scenario *S4*. - -S6: ``CLUSTER_RESIZE`` that Expands a Cluster ---------------------------------------------- - -When a ``CLUSTER_RESIZE`` operation is completed and the operation results in -some new nodes created and added to the cluster, it will record the IDs of -the nodes into the ``creation`` property of the action's ``data`` field. The -logic to update the load balancer and the logic to update the ``data`` field -of individual nodes are identical to that described in scenario *S4*. - -S7: Handling ``NODE_CREATE`` Action ------------------------------------ - -When the action to be processed is a ``NODE_CREATE`` action, the new node has -been created and it is yet to be attached to the load balancer. The logic to -update the load balancer and the ``data`` field of the node in question are -identical to that described in scenario *S4*. - -When the action to be processed is a ``NODE_DELETE`` action, the node is about -to be removed from the cluster. Before that, the policy is responsible to -detach it from the load balancer. The logic to update the load balancer and -the ``data`` field of the node in question are identical to that described in -scenario *S1*. diff --git a/doc/source/contributor/policies/region_v1.rst b/doc/source/contributor/policies/region_v1.rst deleted file mode 100644 index e6f42c171..000000000 --- a/doc/source/contributor/policies/region_v1.rst +++ /dev/null @@ -1,232 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============================ -Region Placement Policy V1.0 -============================ - -This policy is designed to make sure the nodes in a cluster are distributed -across multiple regions according to a specified scheme. - -.. schemaspec:: - :package: senlin.policies.region_placement.RegionPlacementPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -- ``CLUSTER_RESIZE``: an action that accepts a map as its input parameters in - its ``inputs`` property, such as "``adjustment_type``", "``number``" etc. - -- ``NODE_CREATE``: an action originated directly from a RPC request. This - action has an associated node object that will be created. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. Because the same policy implementation is used for covering both the -cases of scaling out a cluster and the cases of scaling in, the region -placement policy need to parse the inputs in different scenarios. - -The placement policy can be used independently, with and without other polices -attached to the same cluster. So the policy needs to understand whether there -are policy decisions from other policies (such as a -:doc:`scaling policy `). - -When the policy is checked, it will first attempt to get the proper ``count`` -input value, which may be an outcome from other policies or the inputs for -the action. For more details, check the scenarios described in following -sections. - - -Scenarios -~~~~~~~~~ - -S1: ``CLUSTER_SCALE_IN`` ------------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``deletion`` field of the action's ``data`` -property. If there is such a field, the policy attempts to extract the -``count`` value from the ``deletion`` field. If the ``count`` value is not -found, 1 is assumed to be the default. - -If, however, the policy fails to find the ``deletion`` field, it tries to find -if there is a ``count`` field in the action's ``inputs`` property. If the -answer is true, the policy will use it, or else it will fall back to assume 1 -as the default count. - -After the policy has find out the ``count`` value (i.e. number of nodes to be -deleted), it validates the list of region names provided to the policy. If for -some reason, none of the provided names passed the validation, the policy -check fails with the following data recorded in the action's ``data`` -property: - -:: - - { - "status": "ERROR", - "reason": "No region is found usable.", - } - -With the list of regions known to be good and the map of node distribution -specified in the policy spec, senlin engine continues to calculate a placement -plan that best matches the desired distribution. - -If there are nodes that cannot be fit into the distribution plan, the policy -check failed with an error recorded in the action's ``data``, as shown below: - -:: - - { - "status": "ERROR", - "reason": "There is no feasible plan to handle all nodes." - } - -If there is a feasible plan to remove nodes from each region, the policy saves -the plan into the ``data`` property of the action as exemplified below: - -:: - - { - "status": "OK", - "deletion": { - "count": 3, - "regions": { - "RegionOne": 2, - "RegionTwo": 1 - } - } - } - -This means in total, 3 nodes should be removed from the cluster. Among them, -2 nodes should be selected from region "``RegionOne``" and the rest one should -be selected from region "``RegionTwo``". - -**NOTE**: When there is a :doc:`deletion policy ` attached to the -same cluster. That deletion policy will be evaluated after the region -placement policy and it is expected to rebase its candidate selection on the -region distribution enforced here. For example, if the deletion policy is -tasked to select the oldest nodes for deletion, it will adapt its behavior to -select the oldest nodes from each region. The number of nodes to be chosen -from each region would be based on the output from this placement policy. - - -S2: ``CLUSTER_SCALE_OUT`` -------------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``creation`` field of the action's ``data`` -property. If there is such a field, the policy attempts to extract the -``count`` value from the ``creation`` field. If the ``count`` value is not -found, 1 is assumed to be the default. - -If, however, the policy fails to find the ``creation`` field, it tries to find -if there is a ``count`` field in the action's ``inputs`` property. If the -answer is true, the policy will use it, or else it will fall back to assume 1 -as the default node count. - -After the policy has find out the ``count`` value (i.e. number of nodes to be -created), it validates the list of region names provided to the policy and -extracts the current distribution of nodes among those regions. - -If for some reason, none of the provided names passed the validation, -the policy check fails with the following data recorded in the action's -``data`` property: - -:: - - { - "status": "ERROR", - "reason": "No region is found usable.", - } - -The logic of generating a distribution plan is almost identical to what have -been described in scenario *S1*, except for the output format. When there is -a feasible plan to accommodate all nodes, the plan is saved into the ``data`` -property of the action as shown in the following example: - -:: - - { - "status": "OK", - "creation": { - "count": 3, - "regions": { - "RegionOne": 1, - "RegionTwo": 2 - } - } - } - -This means in total, 3 nodes should be created into the cluster. Among them, -2 nodes should be created at region "``RegionOne``" and the left one should be -created at region "``RegionTwo``". - -S3: ``CLUSTER_RESIZE`` ----------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``creation`` field of the action's ``data`` -property. If there is such a field, the policy extracts the ``count`` value -from the ``creation`` field. If the ``creation`` field is not found, the policy -tries to find if there is a ``deletion`` field in the action's ``data`` -property. If there is such a field, the policy extracts the ``count`` value -from the ``creation`` field. If neither ``creation`` nor ``deletion`` is found -in the action's ``data`` property, the policy proceeds to parse the raw inputs -of the action. - -The output from the parser may indicate an invalid combination of input -values. If that is the case, the policy check fails with the action's -``data`` set to something like the following example: - -:: - - { - "status": "ERROR", - "reason": - } - -If the parser successfully parsed the action's raw inputs, the policy tries -again to find if there is either ``creation`` or ``deletion`` field in the -action's ``data`` property. It will use the ``count`` value from the field -found as the number of nodes to be handled. - -When the placement policy finds out the number of nodes to create (or delete), -it proceeds to calculate a distribution plan. If the action is about growing -the size of the cluster, the logic and the output format are the same as that -have been outlined in scenario *S2*. Otherwise, the logic and the output -format are identical to that have been described in scenario *S1*. - - -S4: ``NODE_CREATE`` -------------------- - -When handling a ``NODE_CREATE`` action, the region placement policy only needs -to deal with the node associated with the action. If, however, the node is -referencing a profile which has a ``region_name`` specified in its spec, this -policy will avoid choosing deployment region for the node. In other words, the -``region_name`` specified in the profile spec used takes precedence. - -If the profile spec doesn't specify a region name, this placement policy will -proceed to do an evaluation of current region distribution followed by a -calculation of a distribution plan. The logic and the output format are the -same as that in scenario *S2*, although the number of nodes to handle is one -in this case. diff --git a/doc/source/contributor/policies/scaling_v1.rst b/doc/source/contributor/policies/scaling_v1.rst deleted file mode 100644 index 1ceae9050..000000000 --- a/doc/source/contributor/policies/scaling_v1.rst +++ /dev/null @@ -1,149 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -=================== -Scaling Policy V1.0 -=================== - -This policy is designed to help decide the detailed, quantitative parameters -used for scaling in/out a cluster. Senlin does provide a more complicated API -for resizing a cluster (i.e. ``cluster_resize``), however, in some use cases, -we cannot assume the requesters have all the factors to determine each and -every detailed parameters for resizing a cluster. There are cases where the -only thing a requester knows for sure is that a cluster should be scaled out, -or be scaled in. A scaling policy helps derive appropriate, quantitative -parameters for such a request. - -Note that when calculating the target capacity of the cluster, Senlin only -considers the **ACTIVE** nodes. - -.. schemaspec:: - :package: senlin.policies.scaling_policy.ScalingPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. Because the same policy implementation is used for covering both the -cases of scaling out a cluster and the cases of scaling in, the scaling policy -exposes a "``event``" property to differentiate a policy instance. This is -purely an implementation convenience. - -Senlin engine respects the user-provided "``count``" input parameter if it is -specified. Or else, the policy computes a ``count`` value based on the policy's -``adjustment`` property. In both cases, the policy will validate the targeted -capacity against the cluster's size constraints. - -After validating the ``count`` value, the deletion policy proceeds to update -the ``data`` property of the action based on the validation result. If the -validation fails, the ``data`` property of the action will be updated to -something similar to the following example: - -:: - - { - "status": "ERROR", - "reason": "The target capacity (3) is less than cluster's min_size (2)." - } - -If the validation succeeds, the ``data`` property of the action is updated -accordingly (see Scenarios below). - - -Scenarios -~~~~~~~~~ - -S1: ``CLUSTER_SCALE_IN`` ------------------------- - -The request may carry a "``count``" parameter in the action's ``inputs`` field. -The scaling policy respects the user input if provided, or else it will -calculate the number of nodes to be removed based on other properties of the -policy. In either case, the policy will check if the ``count`` value is a -positive integer (or it can be convert to one). - -In the next step, the policy check if the "``best_effort``" property has been -set to ``True`` (default is ``False``). When the value is ``True``, the policy -will attempt to use the actual difference between the cluster's minimum size -and its current capacity rather than the ``count`` value if the latter is -greater than the former. - -When the proper ``count`` value is generated and passes validation, the policy -updates the ``action`` property of the action into something like the -following example: - -:: - - { - "status": "OK", - "reason": "Scaling request validated.", - "deletion": { - "count": 2 - } - } - - -S2: ``CLUSTER_SCALE_OUT`` -------------------------- - -The request may carry a "``count``" parameter in the action's ``inputs`` field. -The scaling policy respects the user input if provided, or else it will -calculate the number of nodes to be added based on other properties of the -policy. In either case, the policy will check if the ``count`` value is a -positive integer (or it can be convert to one). - -In the next step, the policy check if the "``best_effort``" property has been -set to ``True`` (default is ``False``). When the value is ``True``, the policy -will attempt to use the actual difference between the cluster's maximum size -and its current capacity rather than the ``count`` value if the latter is -greater than the former. - -When the proper ``count`` value is generated and passes validation, the policy -updates the ``action`` property of the action into something like the -following example: - -:: - - { - "status": "OK", - "reason": "Scaling request validated.", - "creation": { - "count": 2 - } - } - - -S3: Cross-region or Cross-AZ Scaling ------------------------------------- - -When scaling a cluster across multiple regions or multiple availability zones, -the scaling policy will be evaluated before the -:doc:`region placement policy ` or the -:doc:`zone placement policy ` respectively. Based on -builtin priority settings, checking of this scaling policy always happen -before the region placement policy or the zone placement policy. - -The ``creation.count`` or ``deletion.count`` field is expected to be respected -by the region placement or zone placement policy. In other words, those -placement policies will base their calculation of node distribution on the -``creation.count`` or ``deletion.count`` value respectively. diff --git a/doc/source/contributor/policies/zone_v1.rst b/doc/source/contributor/policies/zone_v1.rst deleted file mode 100644 index 6df931f95..000000000 --- a/doc/source/contributor/policies/zone_v1.rst +++ /dev/null @@ -1,235 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -========================== -Zone Placement Policy V1.0 -========================== - -This policy is designed to make sure the nodes in a cluster are distributed -across multiple availability zones according to a specified scheme. - -.. schemaspec:: - :package: senlin.policies.zone_placement.ZonePlacementPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -- ``CLUSTER_RESIZE``: an action that accepts a map as its input parameters in - its ``inputs`` property, such as "``adjustment_type``", "``number``" etc. - -- ``NODE_CREATE``: an action originated directly from a RPC request. Such an - action will have a node object associated with it, which becomes the one to - be handled by this policy. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. Because the same policy implementation is used for covering both the -cases of scaling out a cluster and the cases of scaling in, the zone placement -policy need to parse the inputs in different scenarios. - -The placement policy can be used independently, with and without other polices -attached to the same cluster. So the policy needs to understand whether there -are policy decisions from other policies (such as a -:doc:`scaling policy `). - -When the policy is checked, it will first attempt to get the proper ``count`` -input value, which may be an outcome from other policies or the inputs for -the action. For more details, check the scenarios described in following -sections. - - -Scenarios -~~~~~~~~~ - -S1: ``CLUSTER_SCALE_IN`` ------------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``deletion`` field of the action's ``data`` -property. If there is such a field, the policy attempts to extract the -``count`` value from the ``deletion`` field. If the ``count`` value is not -found, 1 is assumed to be the default. - -If, however, the policy fails to find the ``deletion`` field, it tries to find -if there is a ``count`` field in the action's ``inputs`` property. If the -answer is true, the policy will use it, or else it will fall back to assume 1 -as the default count. - -After the policy has find out the ``count`` value (i.e. number of nodes to be -deleted), it validates the list of availability zone names provided to the -policy. If for some reason, none of the provided names passed the validation, -the policy check fails with the following data recorded in the action's -``data`` property: - -:: - - { - "status": "ERROR", - "reason": "No availability zone found available.", - } - -With the list of availability zones known to be good and the map of node -distribution specified in the policy spec, senlin engine continues to -calculate a distribution plan that best matches the desired distribution. -If there are nodes that cannot be fit into the distribution plan, the policy -check fails with an error recorded in the action's ``data``, as shown below: - -:: - - { - "status": "ERROR", - "reason": "There is no feasible plan to handle all nodes." - } - -If there is a feasible plan to remove nodes from each availability zone, the -policy saves the plan into the ``data`` property of the action as exemplified -below: - -:: - - { - "status": "OK", - "deletion": { - "count": 3, - "zones": { - "nova-1": 2, - "nova-2": 1 - } - } - } - -This means in total, 3 nodes should be removed from the cluster. Among them, -2 nodes should be selected from availability zone "``nova-1``" and the rest -one should be selected from availability zone "``nova-2``". - -**NOTE**: When there is a :doc:`deletion policy ` attached to the -same cluster. That deletion policy will be evaluated after the zone placement -policy and it is expected to rebase its candidate selection on the zone -distribution enforced here. For example, if the deletion policy is tasked to -select the oldest nodes for deletion, it will adapt its behavior to select -the oldest nodes from each availability zone. The number of nodes to be chosen -from each availability zone would be based on the output from this placement -policy. - - -S2: ``CLUSTER_SCALE_OUT`` -------------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``creation`` field of the action's ``data`` -property. If there is such a field, the policy attempts to extract the -``count`` value from the ``creation`` field. If the ``count`` value is not -found, 1 is assumed to be the default. - -If, however, the policy fails to find the ``creation`` field, it tries to find -if there is a ``count`` field in the action's ``inputs`` property. If the -answer is true, the policy will use it, or else it will fall back to assume 1 -as the default node count. - -After the policy has find out the ``count`` value (i.e. number of nodes to be -created), it validates the list of availability zone names provided to the -policy and extracts the current distribution of nodes among those availability -zones. - -If for some reason, none of the provided names passed the validation, -the policy check fails with the following data recorded in the action's -``data`` property: - -:: - - { - "status": "ERROR", - "reason": "No availability zone found available.", - } - -The logic of generating a distribution plan is almost identical to what have -been described in scenario *S1*, except for the output format. When there is -a feasible plan to accommodate all nodes, the plan is saved into the ``data`` -property of the action as shown in the following example: - -:: - - { - "status": "OK", - "creation": { - "count": 3, - "zones": { - "nova-1": 1, - "nova-2": 2 - } - } - } - -This means in total, 3 nodes should be created into the cluster. Among them, -2 nodes should be created at availability zone "``nova-1``" and the left one -should be created at availability zone "``nova-2``". - -S3: ``CLUSTER_RESIZE`` ----------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``creation`` field of the action's ``data`` -property. If there is such a field, the policy extracts the ``count`` value -from the ``creation`` field. If the ``creation`` field is not found, the policy -tries to find if there is a ``deletion`` field in the action's ``data`` -property. If there is such a field, the policy extracts the ``count`` value -from the ``creation`` field. If neither ``creation`` nor ``deletion`` is found -in the action's ``data`` property, the policy proceeds to parse the raw inputs -of the action. - -The output from the parser may indicate an invalid combination of input -values. If that is the case, the policy check fails with the action's -``data`` set to something like the following example: - -:: - - { - "status": "ERROR", - "reason": - } - -If the parser successfully parsed the action's raw inputs, the policy tries -again to find if there is either ``creation`` or ``deletion`` field in the -action's ``data`` property. It will use the ``count`` value from the field -found as the number of nodes to be handled. - -When the placement policy finds out the number of nodes to create (or delete), -it proceeds to calculate a distribution plan. If the action is about growing -the size of the cluster, the logic and the output format are the same as that -have been outlined in scenario *S2*. Otherwise, the logic and the output -format are identical to that have been described in scenario *S1*. - -S4: ``NODE_CREATE`` -------------------- - -When handling a ``NODE_CREATE`` action, the zone placement policy needs to -process the single node associated with the action, i.e. the node to be -created. If, however, the node is referencing a profile whose spec contains -a ``availability_zone`` property, it means the requesting user has a preferred -availability zone for the new node. In this case, the placement policy will -return directly without choosing availability zone for the node. - -If the profile spec doesn't have ``availability_zone`` specified, the -placement policy will proceed to do an evaluation of the current zone -distribution followed by a calculation of distribution plan so that the new -node will be deployed in a proper availability zone. These logics and the -output format are identical to that in scenario *S2*. diff --git a/doc/source/contributor/policy.rst b/doc/source/contributor/policy.rst deleted file mode 100644 index 7385525cd..000000000 --- a/doc/source/contributor/policy.rst +++ /dev/null @@ -1,146 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======== -Policies -======== - -A policy is a wrapper of a collection of rules that will be checked/enforced -when Senlin performs some operations on the objects it manages. The design -goals of policy support in Senlin are flexibility and customizability. We -strive to make the policies flexible so that we can accommodate diverse types -of policies for various usage scenarios. We also want to make policy type -development an easier task for developers to introduce new policies and/or -customize existing ones for their needs. - - -Policy Properties -~~~~~~~~~~~~~~~~~ - -A policy object has the following properties: - -- ``id``: a string containing the globally unique ID for the object; -- ``name``: a string containing the name of the policy object; -- ``type``: a string containing the name of the policy type; -- ``spec``: a map containing the validated specification for the object; -- ``created_at``: timestamp of the object creation; -- ``updated_at``: timestamp of last update to the object; -- ``data``: a map containing some private data for the policy object; - -Creating a Policy -~~~~~~~~~~~~~~~~~ - -When the Senlin API receives a request to create a policy object, it first -checks if the JSON body contains a map named ``policy`` that has the ``name`` -and ``spec`` keys and values associated with them. If any of these keys are -missing, the request will be treated as an invalid one and rejected. - -After the preliminary request validation done at the Senlin API layer, Senlin -engine will further check whether the specified policy type does exist and -whether the specified ``spec`` can pass the validation logic in the policy -type implementation. If this phase of validation is successful, a policy -object will be created and saved into the database, then a map containing the -details of the object will be returned to the requester. If any of these -validations fail, an error message will be returned to the requester instead. - - -Listing Policies -~~~~~~~~~~~~~~~~ - -Policy objects can be listed using the Senlin API. When querying the policy -objects, a user can specify the following query parameters, individually or -combined: - -- ``filters``: a map containing key-value pairs that will be used for matching - policy records. Records that fail to match this criteria will be filtered - out. The following strings are valid keys: - - * ``name``: name of policies to list, can be a string or a list of strings; - * ``type``: type name of policies, can be a string or a list of strings; - -- ``limit``: a number that restricts the maximum number of records to be - returned from the query. It is useful for displaying the records in pages - where the page size can be specified as the limit. -- ``marker``: A string that represents the last seen UUID of policies in - previous queries. This query will only return results appearing after the - specified UUID. This is useful for displaying records in pages. -- ``sort``: A string to enforce sorting of the results. It can accept a list of - known property names as sorting keys separated by commas. For each sorting - key, you can append either ``:asc`` or ``:desc`` as its sorting order. By - default, ``:asc`` is assumed to be the sorting direction. -- ``global_project``: A boolean indicating whether policy listing should be - done in a tenant-safe way. When this value is specified as False (the - default), only policies from the current project that match the other - criteria will be returned. When this value is specified as True, policies - that matching all other criteria would be returned, no matter in which - project a policy was created. Only a user with admin privilege is permitted - to do a global listing. - - -The Senlin API performs some basic checks on the data type and values of the -provided parameters and then passes the request to Senlin engine. When there -are policy objects matching the query criteria, a list of policy objects is -returned to the requester. If there is no matching record, the result will be -an empty list. - - -Getting a Policy -~~~~~~~~~~~~~~~~ - -A user can provide one of the UUID, the name or the short ID of policy object -to the Senlin API ``policy_show`` to retrieve the details about a policy. - -If a policy object matching the criteria is found, Senlin API returns the -object details in a map; if more than one object is found, Senlin API returns -an error message telling users that there are multiple choices; if no object -is found matching the criteria, a different error message will be returned to -the requester. - - -Updating a Policy -~~~~~~~~~~~~~~~~~ - -After a policy is created, a user can send requests to the Senlin API for -changing some of its properties. To avoid potential state conflicts inside the -Senlin engine, we currently don't allow changes to the ``spec`` property of -a policy. However, changing the ``name`` property is permitted. - -When validating the requester provided parameters, Senlin API will check if -the values are of valid data types and whether the values fall in allowed -ranges. After this validation, the request is forwarded to Senlin engine for -processing. - -Senlin engine will try to find the policy using the specified policy identity -as the UUID, the name or a short ID of the policy object. When no matching -object is found or more than one object is found, an error message is returned -to the user. Otherwise, the engine updates the object property and returns the -object details in a map. - - -Deleting a Policy -~~~~~~~~~~~~~~~~~ - -A user can specify the UUID, the name or the short ID of a policy object when -sending a ``policy_delete`` request to the Senlin API. - -Senlin engine will try to find the matching policy object using the specified -identity as the UUID, the name or a short ID of the policy object. When no -matching object is found or more than one object is found, an error message is -returned. Otherwise, the API returns a 204 status to the requester indicating -that the deletion was successful. - -To prevent deletion of policies that are still in use by any clusters, the -Senlin engine will try to find if any bindings exist between the specified -policy and a cluster. An error message will be returned to the requester if -such a binding is found. diff --git a/doc/source/contributor/policy_type.rst b/doc/source/contributor/policy_type.rst deleted file mode 100644 index 1aad11115..000000000 --- a/doc/source/contributor/policy_type.rst +++ /dev/null @@ -1,293 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============ -Policy Types -============ - -A :doc:`policy ` policy is a set of rules that are checked -and enforced. The checking can be done before or after an action's execution -or both. Policies are of different policy types, each of which is designed to -make sure that a cluster's behavior follows certain patterns or complies with -certain restrictions. - -When released, Senlin comes with some built-in policy types to meet the -requirements found in some typical use cases. However, the distributors or the -users can always augment their collection of policy types by implementing -their own ones. - -Policy type implementations are managed as Senlin plugins. The plan is to have -Senlin engine support dynamical loading of plugins from user specified modules -and classes. Currently, this can be achieved by adding new ``senlin.policies`` -entries in the ``entry_points`` section in the ``setup.cfg`` file, followed by -a reinstall of the Senlin service, i.e. ``sudo pip install`` command. - - -The Base Class ``Policy`` -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The base class ``Policy`` provides some common logics regarding the following -operations: - -- The initialization of the ``spec_data`` property, based on the - ``spec_schema`` definition and the ``spec`` input. -- The serialization and deserialization of a policy object into/from database. -- The serialization and deserialization of a policy object into/from a dict. -- The default validation operation for the ``spec_data`` property. -- Default implementations for the following methods which are to be overridden - by a policy type implementation: - - * ``attach(cluster_id, action)``: a method that will be invoked when a policy - object of this type is attached to a cluster. - * ``detach(cluster_id, action)``: a method that will be invoked when a policy - object of this type is detached from a cluster. - * ``pre_op(cluster_id, action)``: a method that will be invoked before an - action is executed; - * ``post_op(cluster_id, action)``: a method that will be invoked after an - action is executed. - - -The ``VERSIONS`` Property -------------------------- - -Each policy type class has a ``VERSIONS`` class property that documents the -changes to the policy type. This information is returned when users request -to list all policy types supported. - -The ``VERSIONS`` property is a dict with version numbers as keys. For each -specific version, the value is list of support status changes made to the -policy type. Each change record contains a ``status`` key whose value is one -of ``EXPERIMENTAL``, ``SUPPORTED``, ``DEPRECATED`` or ``UNSUPPORTED``, and a -``since`` key whose value is of format ``yyyy.mm`` where ``yyyy`` and ``mm`` -are the year and month of the release that bears the change to the support -status. For example, the following record indicates that the specific policy -type was introduced in April, 2016 (i.e. version 1.0 release of Senlin) as -an experimental feature; later, in October, 2016 (i.e. version 2.0 release of -Senlin) it has graduated into a mature feature supported by the developer -team. - -.. code:: python - - VERSIONS = { - '1.0': [ - { - "status": "EXPERIMENTAL", - "since": "2016.04" - }, - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - - -Providing New Policy Types -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Adding new policy type implementations is an easy task with only a few steps -to follow. - - -Develop A New Policy Type -------------------------- - -The first step for adding a new policy type is to create a new file containing -a subclass of ``Policy``. Then you will define the spec schema for the new -policy type in a Python dictionary named ``spec_schema``. - - -Defining Spec Schema --------------------- - -Each key in this dictionary represents a property name; the value of it is an -object of one of the schema types listed below: - -- ``String``: A string property. -- ``Boolean``: A boolean property. -- ``Integer``: An integer property. -- ``List``: A property containing a list of values. -- ``Map``: A property containing a map of key-value pairs. - -For example: - -.. code:: python - - spec_schema = { - 'destroy_after_delete': schema.Boolean( - 'Boolean indicating whether object will be destroyed after deletion.', - default=True, - ), - ... - } - - -If a property value will be a list, you can further define the type of items -the list can accept. For example: - -.. code:: python - - spec_schema = { - 'criteria': schema.List( - 'Criteria for object selection that will be evaluated in order.', - schema=schema.String('Name of a criterion'), - ), - ... - } - -If a property value will be a map of key-value pairs, you can define the -schema of the map, which is another Python dictionary containing definitions -of properties. For example: - -.. code:: python - - spec_schema = { - 'strategy': schema.Map( - 'Strategy for dealing with servers with different states.', - schema={ - 'inactive': 'boot', - 'deleted': 'create', - 'suspended': 'resume', - }, - ), - ... - } - -When creating a schema type object, you can specify the following keyword -arguments to gain a better control of the property: - -- ``default``: a default value of the expected data type; -- ``required``: a boolean value indicating whether a missing of the property - is acceptable when validating the policy spec; -- ``constraints``: a list of ``Constraint`` objects each of which defines a - constraint to be checked. Senlin currently only support ``AllowedValues`` - constraint. - - -Applicable Profile Types ------------------------- - -Not all policy types can be used on all profile types. For example, a policy -about load-balancing is only meaningful for objects that can handle workloads, -or more specifically, objects that expose service access point on an IP port. - -You can define what are the profile types your new policy type can handle by -specifying the ``PROFILE_TYPE`` property of your policy type class. The value -of ``PROFILE_TYPE`` is a list of profile type names. If a policy type is -designed to handle all profile types, you can specify a single entry ``ANY`` -as the value. See :doc:`profile types ` for profile type related -operations. - - -Policy Targets --------------- - -A policy type is usually defined to handle certain operations. The rules -embedded in the implementation may need to be checked before the execution of -an :doc:`action ` or they may need to be enforced after the execution -of the action. When an action is about to be executed or an action has -finished execution, the Senlin engine will check if any policy objects -attached to a cluster is interested in the action. If the answer is yes, the -engine will invoke the ``pre_op`` function or the ``post_op`` function -respectively, thus giving the policy object a chance to adjust the action's -behavior. - -You can define a ``TARGET`` property for the policy type implementation to -indicate the actions your policy type want to subscribe to. The ``TARGET`` -property is a list of tuple (``WHEN``, ``ACTION``). For example, the following -property definition indicates that the policy type is interested in the action -``CLUSTER_SCALE_IN`` and ``CLUSTER_DEL_NODES``. The policy type wants itself -be consulted *before* these actions are performed. - -.. code:: python - - class MyPolicyType(Policy): - ... - TARGET = [ - (BEFORE, consts.CLUSTER_SCALE_IN), - (BEFORE, consts.CLUSTER_DEL_NODES), - ] - ... - -When the corresponding actions are about to be executed, the ``pre_op`` -function of this policy object will be invoked. - - -Passing Data Between Policies ------------------------------ - -Each policy type may decide to send some data as additional inputs or -constraints for the action to consume. This is done by modifying the ``data`` -property of an ``Action`` object (see :doc:`action `). - -A policy type may want to check if there are other policy objects leaving some -policy decisions in the ``data`` property of an action object. - -Senlin allows for more than one policy to be attached to the same cluster. -Each policy, when enabled, is supposed to check a specific subset of cluster -actions. In other words, different policies may get checked before/after the -engine executes a specific cluster action. This design is effectively forming -a chain of policies for checking. The decisions (outcomes) from one policy -sometimes impact other policies that are checked later. - -To help other developers to understand how a specific policy type is designed -to work in concert with others, we require all policy type implementations -shipped with Senlin accompanied by a documentation about: - -* the ``action data`` items the policy type will consume, including how these - data will impact the policy decisions. -* the ``action.data`` items the policy type will produce, thus consumable by - any policies downstream. - -For built-in policy types, the protocol is documented below: - -.. toctree:: - :maxdepth: 1 - - policies/affinity_v1 - policies/deletion_v1 - policies/load_balance_v1 - policies/region_v1 - policies/scaling_v1 - policies/zone_v1 - - -Registering The New Policy Type -------------------------------- - -For Senlin service to be aware of and thus to make use of the new policy type -you have just developed, you will register it to the Senlin service. -Currently, this is done through a manual process shown below. In future, -Senlin will provide dynamical loading support to policy type plugins. - -To register a new plugin type, you will add a line to the ``setup.cfg`` file -that can be found at the root directory of Senlin code base. For example: - -:: - - [entry_points] - senlin.policies = - ScalingPolicy = senlin.policies.scaling_policy:ScalingPolicy - MyCoolPolicy = : - -Finally, save that file and do a reinstall of the Senlin service, followed -by a restart of the ``senlin-engine`` process. - -:: - - $ sudo pip install -e . - - -Now, when you do a :command:`openstack cluster policy type list`, you will see -your policy type listed along with other existing policy types. diff --git a/doc/source/contributor/profile.rst b/doc/source/contributor/profile.rst deleted file mode 100644 index 2719e98d8..000000000 --- a/doc/source/contributor/profile.rst +++ /dev/null @@ -1,149 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======== -Profiles -======== - -A profile is an object instantiated from a "profile type" and it is used as -the specification for creating a physical object to be managed by Senlin. The -"physical" adjective here is used to differentiate such an object from its -counterpart, the "logical" object, which is referred to as a node in Senlin. - -As the specification for physical object creation, a profile contains almost -every piece of information needed for the underlying driver to create an -object. After a physical object is created, its UUID will be assigned to the -``physical_id`` property of a node as reference. When a physical object is -deleted, the ``physical_id`` property will be set to ``None``. - -Although not required, a profile may reference the node object's properties -when creating a physical object. For example, a profile may use the node's -``index`` property value for generating a name for the object; a profile may -customize an object's property based on the ``role`` property value of a node. -It is up to the profile type author and the specific use case how a profile is -making use of the properties of a node. - - -Profile Properties -~~~~~~~~~~~~~~~~~~ - -A profile object has the following properties: - -- ``id``: a global unique ID assigned to the object after creation; -- ``name``: a string representation of the profile name; -- ``type``: a string referencing the profile type used; -- ``context``: a map of key-value pairs that contains credentials and/or - parameters for authentication with an identity service. When a profile is - about to create an object, it will use data stored here to establish a - connection to a service; -- ``spec``: a map of key-value pairs that contains the specification for - object creation. The content of this property is dictated by the - corresponding profile type. -- ``metadata``: a map of key-value pairs associated with the profile; -- ``created_at``: the timestamp when the profile was created; -- ``updated_at``: the timestamp when the profile was last updated; - -The ``spec`` property is the most important property for a profile. It is -immutable, i.e. the only way to "change" the ``spec`` property is to create -a new profile. By restricting changes to this property, Senlin can do a better -job in managing the object configurations. - - -Creating A Profile -~~~~~~~~~~~~~~~~~~ - -When creating a profile using the ``profile_create`` API, a user must provide -the ``name`` and ``spec`` parameters. All other parameters are optional. - -The provided ``spec`` map will be validated using the validation logic -provided by the corresponding profile type. If the validation succeeds, the -profile will be created and stored into the database. Senlin engine returns -the details of the profile as a dict back to Senlin API and eventually to the -requesting user. If the validation fails, Senlin engine returns an error -message describing the reason of the failure. - - -Listing Profiles -~~~~~~~~~~~~~~~~ - -Senlin profiles an API for listing all profiles known to the Senlin engine. -When querying the profiles, users can provide any of the following parameters: - -- ``filters``: a map of key-value pairs to filter profiles, where each key can - be one of the following word and the value(s) are for the Senlin engine to - match against all profiles. - - - ``name``: profile name for matching; - - ``type``: profile type for matching; - - ``metadata``: a string for matching profile metadata. - -- ``limit``: an integer that specifies the maximum number of records to be - returned from the API call; -- ``marker``: a string specifying the UUID of the last seen record; only those - records that appear after the given value will be returned; -- ``sort``: A string to enforce sorting of the results. It accepts a list of - known property names of a profile as sorting keys separated by commas. Each - sorting key can optionally have either ``:asc`` or ``:desc`` appended to the - key for controlling the sorting direction. -- ``global_project``: A boolean indicating whether profile listing should be - done in a tenant-safe way. When this value is specified as False (the - default), only profiles from the current project that match the other - criteria will be returned. When this value is specified as True, profiles - that matching all other criteria would be returned, no matter in which - project a profile was created. Only a user with admin privilege is permitted - to do a global listing. - -If there are profiles matching the query criteria, Senlin API returns a list -named ``profiles`` where each entry is a JSON map containing details about a -profile object. Otherwise, an empty list or an error message will be returned -depending on whether the query was well formed. - - -Getting A Profile -~~~~~~~~~~~~~~~~~ - -A user can provide one of the following values in attempt to retrieve the -details of a specific profile. - -- Profile UUID: Query is performed strictly based on the UUID value given. This - is the most precise query supported in Senlin. -- Profile name: Senlin allows multiple profiles to have the same name. It is - user's responsibility to avoid name conflicts if needed. Senlin engine will - return a message telling users that multiple profiles found matching this - name if the provided name cannot uniquely identify a profile. -- short ID: Considering that UUID is a long string not so convenient to input, - Senlin supports a short version of UUIDs for query. Senlin engine will use - the provided string as a prefix to attempt a matching in the database. When - the "ID" is long enough to be unique, the details of the matching profile is - returned, or else Senlin will return an error message indicating that - multiple profiles were found matching the specified short ID. - - -Updating A Profile -~~~~~~~~~~~~~~~~~~ - -Once a profile object is created, a user can request its properties to be -updated. Updates to the ``name`` or ``metadata`` properties are applied on -the specified profile object directly. Changing the ``spec`` property of a -profile object is not permitted. - - -Deleting A Profile -~~~~~~~~~~~~~~~~~~ - -A user can provide one of profile UUID, profile name or a short ID of a -profile when requesting a profile object to be deleted. Senlin engine will -check if there are still any clusters or nodes using the specific profile. -Since a profile in use cannot be deleted, if any such clusters or nodes are -found, an error message will be returned to user. diff --git a/doc/source/contributor/profile_type.rst b/doc/source/contributor/profile_type.rst deleted file mode 100644 index a94be0271..000000000 --- a/doc/source/contributor/profile_type.rst +++ /dev/null @@ -1,271 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============= -Profile Types -============= - -In Senlin, each node is associated with a physical object created by -instantiating a :doc:`profile `. Profiles themselves are objects -instantiated from "profile types". In other words, a profile type provides the -specification for creating profiles while a profile can be used to create -multiple homogeneous objects. - -Profile type implementations are managed as plugins. Users can use the -built-in profile types directly and they can provide their own implementation -of new profile types. The plan is to have Senlin engine support dynamical -loading of plugins. Currently, this can be done by adding new -``senlin.profiles`` entry in the ``entry_points`` section in the ``setup.cfg`` -file followed by a reinstall (i.e. ``pip install``) operation. - - -The Base Class 'Profile' -~~~~~~~~~~~~~~~~~~~~~~~~ - -The base class ``Profile`` provides some common logics regarding the following -operations: - -- the initialization of the ``spec_data`` based on the ``spec_schema`` - property and the ``spec`` input. -- the initialization of a basic request context using the Senlin service - credentials. -- the serialization and deserialization of profile object into/from database. -- the validation of data provided through ``spec`` field of the profile; -- the north bound APIs that are provided as class methods, including: - - * ``create_object()``: create an object using logic from the profile type - implementation, with data from the profile object as inputs; - * ``delete_object()``: delete an object using the profile type - implementation; - * ``update_object()``: update an object by invoking operation provided by a - profile type implementation, with data from a different profile object as - inputs; - * ``get_details()``: retrieve object details into a dictionary by invoking - the corresponding method provided by a profile type implementation; - * ``join_cluster()``: a hook API that will be invoked when an object is made - into a member of a cluster; the purpose is to give the profile type - implementation a chance to make changes to the object accordingly; - * ``leave_cluster()``: a hook API that will be invoked when an object is - removed from its current cluster; the purpose is to give the profile type - implementation a chance to make changes to the object accordingly; - * ``recover_object()``: recover an object with operation given by inputs from - the profile object. By default, ``recreate`` is used if no operation is - provided to delete firstly then create the object. - - -Abstract Methods ----------------- - -In addition to the above logics, the base class ``Profile`` also defines some -abstract methods for a profile type implementation to implement. When invoked, -these methods by default return ``NotImplemented``, a special value that -indicates the method is not implemented. - -- ``do_create(obj)``: an object creation method for a profile type - implementation to override; -- ``do_delete(obj)``: an object deletion method for a profile type - implementation to override; -- ``do_update(obj, new_profile)``: an object update method for a profile type - implementation to override; -- ``do_check(obj)``: a method that is meant to do a health check over the - provided object; -- ``do_get_details(obj)``: a method that can be overridden so that the caller - can get a dict that contains properties specific to the object; -- ``do_join(obj)``: a method for implementation to override so that profile - type specific changes can be made to the object when object joins a cluster. -- ``do_leave(obj)``: a method for implementation to override so that profile - type specific changes can be made to the object when object leaves its - cluster. -- ``do_recover(obj)``: an object recover method for a profile type - implementation to override. Nova server, for example, overrides the recover - operation by ``REBUILD``. - - -The ``VERSIONS`` Property -------------------------- - -Each profile type class has a ``VERSIONS`` class property that documents the -changes to the profile type. This information is returned when users request -to list all profile types supported. - -The ``VERSIONS`` property is a dict with version numbers as keys. For each -specific version, the value is list of support status changes made to the -profile type. Each change record contains a ``status`` key whose value is one -of ``EXPERIMENTAL``, ``SUPPORTED``, ``DEPRECATED`` or ``UNSUPPORTED``, and a -``since`` key whose value is of format ``yyyy.mm`` where ``yyyy`` and ``mm`` -are the year and month of the release that bears the change to the support -status. For example, the following record indicates that the specific profile -type was introduced in April, 2016 (i.e. version 1.0 release of Senlin) as -an experimental feature; later, in October, 2016 (i.e. version 2.0 release of -Senlin) it has graduated into a mature feature supported by the developer -team. - -.. code:: python - - VERSIONS = { - '1.0': [ - { - "status": "EXPERIMENTAL", - "since": "2016.04" - }, - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - - -The ``context`` Property ------------------------- - -In the ``Profile`` class, there is a special property named ``context``. This -is the data structure containing all necessary information needed when the -profile type implementation wants to authenticate with a cloud platform. -Refer to :doc:`authorization `, Senlin makes use of the trust -mechanism provided by the OpenStack Keystone service. - -The dictionary in this ``context`` property by default contains the credentials -for the Senlin service account. Using the trust built between the requesting -user and the service account, a profile type implementation can authenticate -itself with the backend Keystone service and then interact with the supporting -service like Nova, Heat etc. - -All profile type implementations can include a ``context`` key in their spec, -the default value is an empty dictionary. A user may customize the contents -when creating a profile object by specifying a ``region_name``, for example, -to enable a multi-region cluster deployment. They could even specify a -different ``auth_url`` so that a cluster can be built across OpenStack clouds. - - -Providing New Profile Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When released, Senlin provides some built-in profile types. However, -developing new profile types for Senlin is not a difficult task. - - -Develop a New Profile Type --------------------------- - -The first step is to create a new file containing a subclass of ``Profile``. -Then you will define the spec schema for the new profile which is a python -dictionary named ``spec_schema``, with property names as keys. For each -property, you will specify its value to be an object of one of the schema -types listed below: - -- ``String``: A string property. -- ``Boolean``: A boolean property. -- ``Integer``: An integer property. -- ``List``: A property containing a list of values. -- ``Map``: A property containing a map of key-value pairs. - -For example: - -.. code:: python - - spec_schema = { - 'name': schema.String('name of object'), - 'capacity': schema.Integer('capacity of object', default=10), - 'shared': schema.Boolean('whether object is shared', default=True) - } - -If a profile property is a ``List``, you can further define the type of -elements in the list, which can be a ``String``, a ``Boolean``, an -``Integer`` or a ``Map``. For example: - -.. code:: python - - spec_schema = { - ... - 'addresses': schema.List( - 'address of object on each network', - schema=schema.String('address on a network') - ), - ... - } - -If a profile property is a ``Map``, you can further define the "schema" of that -map, which itself is another Python dictionary containing property -definitions. For example: - -.. code:: python - - spec_schema = { - ... - 'dimension': schema.Map( - 'dimension of object', - schema={ - 'length': schema.Integer('length of object'), - 'width': schema.Integer('width of object') - } - ) - ... - } - - -By default, a property is not required. If a property has to be provided, you -can specify ``required=True`` in the property type constructor. For example: - -.. code:: python - - spec_schema = { - ... - 'name_length': schema.Integer('length of name', required=True) - ... - } - -A property can have a default value when no value is specified. If a property -has a default value, you don't need to specify it is required. For example: - -.. code:: python - - spec_schema = { - ... - 'min_size': schema.Integer('minimum size of object', default=0) - ... - } - -After the properties are defined, you can continue to work on overriding the -abstract methods inherited from the base ``Profile`` type as appropriate. - - -Registering a New Profile Type ------------------------------- - -For Senlin to make use of the new profile type you have just developed, you -will register it to Senlin service. Currently, this is done through a manual -process. In future, Senlin will provide dynamical loading support to profile -type plugins. - -To register a new profile type, you will add a line to the ``setup.cfg`` file -that can be found at the root directory of Senlin code base. For example: - -:: - - [entry_points] - senlin.profiles = - os.heat.stack-1.0 = senlin.profiles.os.heat.stack:StackProfile - os.nova.server-1.0 = senlin.profiles.os.nova.server:ServerProfile - my.cool.profile-1.0 = : - -Finally, save that file and do a reinstall of the Senlin service, followed by -a restart of the ``senlin-engine`` process. - -:: - - $ sudo pip install -e . - -Now, when you do a :command:`openstack cluster profile type list`, you will -see your profile type listed along with other existing profile types. diff --git a/doc/source/contributor/receiver.rst b/doc/source/contributor/receiver.rst deleted file mode 100644 index 99116451f..000000000 --- a/doc/source/contributor/receiver.rst +++ /dev/null @@ -1,226 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======== -Receiver -======== - -Concept -~~~~~~~ - -A :term:`Receiver` is an abstract resource created in Senlin engine to handle -operation automation. You can create a receiver to trigger a specific action -on a cluster on behalf of a user when some external alarms or events are -fired. - -A receiver can be of different types. The ``type`` of a receiver is specified -when being created. Currently, two receiver types are supported: ``webhook`` -and ``message``. For a ``webhook`` receiver, a :term:`Webhook` URI is generated -for users or programs to trigger a cluster action by send a HTTP POST request. -For a ``message`` receiver, a Zaqar queue is created for users or programs to -trigger a cluster action by sending a message. - -A receiver encapsulates the information needed for triggering an action. These -information may include: - -* ``actor``: the credential of a user on whose behalf the action will be - triggered. This is usually the user who created the receiver, but it can be - any other valid user explicitly specified when the receiver is created. -* ``cluster_id``: the ID of the targeted cluster. It is required only for - ``webhook`` receivers. -* ``action``: the name of an action that is applicable on a cluster. It is - required only for ``webhook`` receivers. -* ``params``: a dictionary feeding argument values (if any) to the action. It - is optional for all types of receivers. - -In the long term, senlin may support user-defined actions where ``action`` -will be interpreted as the UUID or name of a user-defined action. - - -Creating a Receiver -~~~~~~~~~~~~~~~~~~~ - -Creating a webhook receiver ---------------------------- - -When a user requests to create a webhook receiver by invoking the -:program:`openstack` command, the request comes with at least three -parameters: the receiver type which should be ``webhook``, the targeted -cluster and the intended action to invoke when the receiver is triggered. -Optionally, the user can provide some additional parameters to use and/or -the credentials of a different user. - -When the Senlin API service receives the request, it does three things: - -* Validating the request and rejects it if any one of the following conditions - is met: - - - the receiver type specified is not supported; - - the targeted cluster can not be found; - - the targeted cluster is not owned by the requester and the requester does - not have an "``admin``" role in the project; - - the provided action is not applicable on a cluster. - -* Creating a receiver object that contains all necessary information that will - be used to trigger the specified action on the specified cluster. - -* Creating a "channel" which contains information users can use to trigger - a cluster action. For the ``webhook`` receiver, this is a URL stored in - the ``alarm_url`` field and it looks like:: - - http://{host:port}/v1/webhooks/{webhook_id}/trigger?V=2 - - **NOTE**: The ``V=2`` above is used to encode the current webhook triggering - protocol. When the protocol changes in future, the value will be changed - accordingly. - -Finally, Senlin engine returns a dictionary containing the properties of the -receiver object. - -Creating a message receiver ---------------------------- - -When a user requests to create a message receiver by invoking :program:`openstack` -command, the receiver type ``message`` is the only parameter need to be specified. - -When the Senlin API service receives the request, it does the following things: - -* Validating the request and rejecting it if the receiver type specified is not - supported; - -* Creating a receiver object whose cluster_id and action properties are `None`; - -* Creating a "channel" which contains information users can use to trigger - a cluster action. For a ``message`` receiver, the following steps are - followed: - - - Creating a Zaqar queue whose name has the ``senlin-receiver-`` prefix. - - Building a trust between the requester (trustor) and the Zaqar trustee - user (trustee) if this trust relationship has not been created yet. - The ``trust_id`` will be used to create message subscriptions in the next - step. - - Creating a Zaqar subscription targeting on the queue just created and - specifying the HTTP subscriber to the following URL:: - - http://{host:port}/v1/v1/receivers/{receiver_id}/notify - - - Storing the name of queue into the ``queue_name`` field of the receiver's - channel. - -Finally, Senlin engine returns a dictionary containing the properties of the -receiver object. - - -Triggering a Receiver -~~~~~~~~~~~~~~~~~~~~~ - -Different types of receivers are triggered in different ways. For example, a -``webhook`` receiver is triggered via the ``alarm_url`` channel; a message -queue receiver can be triggered via messages delivered in a shared queue. - - -Triggering a Webhook --------------------- - -When triggering a webhook, a user or a software sends a ``POST`` request to -the receiver's ``alarm_url`` channel, which is a specially encoded URL. This -request is first processed by the ``webhook`` middleware before arriving at -the Senlin API service. - -The ``webhook`` middleware checks this request and parses the format of the -request URL. The middleware attempts to find the receiver record from Senlin -database and see if the named receiver does exist. If the receiver is found, -it then tries to load the saved credentials. An error code 404 will be -returned if the receiver is not found. - -After having retrieved the credentials, the middleware will proceed to get a -Keystone token using credentials combined with Senlin service account info. -Using this token, the triggering request can proceed along the pipeline of -middlewares. An exception will be thrown if the authentication operation fails. - -When the senlin engine service receives the webhook triggering request it -creates an action based on the information stored in the receiver object. -The newly created action is then dispatched and scheduled by a scheduler to -perform the expected operation. - -Triggering a Message Receiver ------------------------------ - -When triggering a message receiver, a user or a software needs to send -message(s) to the Zaqar queue whose name can be found from the channel data of -the receiver. Then the Zaqar service will notify the Senlin service for the -message(s) by sending a HTTP POST request to the Senlin subscriber URL. -Note: this POST request is sent using the Zaqar trustee user credential -and the ``trust_id`` defined in the subscriber. Therefore, Senlin will -recognize the requester as the receiver owner rather than the Zaqar service -user. - -Then Senlin API then receives this POST request, parses the authentication -information and then makes a ``receiver_notify`` RPC call to the senlin engine. - -The Senlin engine receives the RPC call, claims message(s) from Zaqar and then -builds action(s) based on payload contained in the message body. A message will -be ignored if any one of the following conditions is met: - - - the ``cluster`` or the ``action`` field cannot be found in message body; - - the targeted cluster cannot be found; - - the targeted cluster is not owned by the receiver owner and the receiver - owner does not have "``admin``" role in the project; - - the provided action is not applicable on a cluster. - -Then those newly created action(s) will be scheduled to run to perform the -expected operation. - -Credentials -~~~~~~~~~~~ - -Webhook Receiver ----------------- - -When requesting to create a ``webhook`` receiver, the requester can choose to -provide some credentials by specifying the ``actor`` property of the receiver. -This information will be used for invoking the webhook in the future. There -are several options to provide these credentials. - -If the ``credentials`` to use is explicitly specified, Senlin will save it in -the receiver DB record. When the webhook is invoked later, the saved -credentials will be used for authentication with Keystone. Senlin engine -won't check if the provided credentials actually works when creating the -receiver. The check is postponed to the moment when the receiver is triggered. - -If the ``credentials`` to use is not explicitly provided, Senlin will assume -that the receiver will be triggered in the future using the requester's -credential. To make sure the future authentication succeeds, Senlin engine -will extract the ``user`` ID from the invoking context and create a trust -between the user and the ``senlin`` service account, just like the way how -Senlin deals with other operations. - -The requester must be either the owner of the targeted cluster or he/she has -the ``admin`` role in the project. This is enforced by the policy middleware. -If the requester is the ``admin`` of the project, Senlin engine will use the -cluster owner's credentials (i.e. a trust with the Senlin user in this case). - - -Message Receiver ----------------- - -When requesting to create a ``message`` receiver, the requester does not need -to provide any extra credentials. However, to enable token based authentication -for Zaqar message notifications, Zaqar trustee user information like -``auth_type``, ``auth_url``, ``username``, ``password``, ``project_name``, -``user_domain_name``, ``project_domain_name``, etc. must be configured in the -Senlin configuration file. By default, Zaqar trustee user is the same as Zaqar -service user, for example "zaqar". However, operators are also allowed to -specify other dedicated user as Zaqar trustee user for message notifying. -Therefore, please ensure Zaqar trustee user information defined in senlin.conf -are identical to the ones defined in zaqar.conf. diff --git a/doc/source/contributor/reviews.rst b/doc/source/contributor/reviews.rst deleted file mode 100644 index 02af38aac..000000000 --- a/doc/source/contributor/reviews.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======= -Reviews -======= - -About Global Requirements -~~~~~~~~~~~~~~~~~~~~~~~~~ - -When reviewing patches proposed by `OpenStack Proposal Bot`, we often quick -approve them if the patch successfully passed the gate jobs. However, we -should realize that these tests may contain some improvements or radical -changes to the packages senlin imports. - -A more appropriate workflow should be checking the version changes proposed -in such patches and examine the git log from each particular package. If there -are significant changes that may simplify senlin code base, we should propose -at least a TODO item to write down the needed changes to senlin so we adapt -senlin code to the new package. - - -About Trivial Changes -~~~~~~~~~~~~~~~~~~~~~ - -There are always disagreements across the community about trivial changes such -as grammar fixes, mis-spelling changes in comments etc. These changes are in -general okay to get merged, BUT our core reviewers should be aware that these -behavior are not encouraged. When we notice such behavior from some -developers, it is our responsibility to guide these developers to submit more -useful patches. We are not supposed to reject such changes as a punishment or -something like that. We are about building a great software with a great team. diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst deleted file mode 100644 index f5fb8a1bc..000000000 --- a/doc/source/contributor/testing.rst +++ /dev/null @@ -1,338 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============== -Senlin testing -============== - -Overview of Testing -~~~~~~~~~~~~~~~~~~~ - -The Senlin project currently has five different types of testing facilities in -place for developers to perform different kinds of tests: - -- *Unit Tests*: These are source code level testing that verifies the classes - and methods behave as implemented. Once implemented, these tests are also - used to guarantee that code behavior won't change accidentally by other - patches. -- *API Tests*: These tests treat the *senlin-api* and the *senlin-engine* as - black boxes. The test cases focus more on the API surface rather than how - each API is implemented. Once implemented, these tests help ensure that - the user-visible service interface don't change without a good reason. -- *Functional Tests*: These tests also treat the *senlin-api* and the - *senlin-engine* as block boxes. They focus more on the user perceivable - service behavior. Most tests are anticipated to test a particular "story" - and verify that the *senlin-engine* always behave consistently. -- *Integration Tests*: These are the tests that integrate senlin with other - OpenStack services and verify the senlin service can perform its operations - correctly when interacting with other services. -- *Stress Tests*: These are tests for measuring the performance of the - *senlin-api* and *senlin-engine* under different workloads. - - -Cloud Backends -~~~~~~~~~~~~~~ - -The senlin server is shipped with two collections of "cloud backends": one for -interacting with a real OpenStack deployment, the other for running complex -tests including api tests, functional tests, stress tests. The first cloud -backend is referred to as '`openstack`' and the second is referred to as -'`openstack_test`'. While the `openstack` cloud backend contains full featured -drivers for senlin to talk to the OpenStack services supported, the -`openstack_test` backend contains some "dummy" drivers that return fake -responses for service requests. The `openstack_test` driver is located at -:file:`senlin/tests/drivers` subdirectory. It is provided to facilitate tests -on the senlin service itself without involving any other OpenStack services. -Several types of tests can benefit from these "dummy" drivers because 1) they -can save developers a lot time on debugging complex issues when interacting -with other OpenStack services, and 2) they make running those types of tests -much easier and quicker. - -Note that "Integration Tests" are designed for senlin to interact with real -services so we should use the `openstack` backend rather than the -`openstack_test` backend. - -To configure the backend to use before running tests, you can check the -`[DEFAULT]` section in the configuration file :file:`/etc/senlin/senlin.conf`. - -:: - - [DEFAULT] - cloud_backend = openstack_test # use this for api, functional tests; - # or 'openstack' for production environment - # and integration tests. - - -Unit Tests -~~~~~~~~~~ - -All unit tests are to be placed in the :file:`senlin/tests/unit` sub-directory. -Test cases are organized by the targeted subsystems/modules. Each subsystem -directory must contain a separate blank __init__.py for tests discovery to -function properly. - -An example directory structure:: - - senlin - `- tests - `- unit - |-- db - | |-- __init__.py - | |-- test_cluster_api.py - | `-- test_node_api.py - |-- engine - | |-- __init__.py - | |-- test_clusters.py - | `-- test_nodes.py - |-- __init__.py - `-- test_utils.py - - -Writing a Unit Test -------------------- - -The *os-testr* software (see: https://pypi.org/project/os-testr/) is used to -find and run tests, parallelize their runs, and record timing/results. - -If new dependencies are introduced upon the development of a test, the -`test-requirements.txt` file needs to be updated so that the virtual -environment will be able to successfully execute all tests. - -The `test-requirements.txt` file needs to be synchronized with the -openstack/global-requirements project. Developers should try avoid -introducing additional package dependencies unless forced to. - - -Running Unit Tests ------------------- - -Senlin uses `tox` for running unit tests, as practiced by many other OpenStack -projects:: - - $ tox - -This by default will run unit tests suite with Python 2.7 and PEP8/HACKING -style checks. To run only one type of tests you can explicitly provide `tox` -with the test environment to use:: - - $ tox -e py27 # test suite on python 2.7 - $ tox -e pep8 # run full source code checker - -To run only a subset of tests, you can provide `tox` with a regex argument:: - - $ tox -e py27 -- -r ClusterTest - -To use debugger like `pdb` during test run, you have to run tests directly -with other, non-concurrent test runner instead of `testr`. -That also presumes that you have a virtual env with all senlin dependencies -installed and configured. - -A more convenient way to run specific test is to name the unit test directly, -as shown below:: - - $ python -m testtools.run senlin.tests.unit.db.test_cluster_api - -This command, however, is not using dependent packages in a particular virtual -environment as the `tox` command does. It is using the system-wide Python -package repository when running the tests. - - -API Tests -~~~~~~~~~ - -Senlin API test cases are written based on the *tempest* framework (see: -`tempest_overview`_). Test cases are developed using the Tempest Plugin -Interface (see: `tempest_plugin`_ ). - - -Writing an API Test Case ------------------------- - -API tests are hosted in the `senlin-tempest-plugin` project. When new APIs are added -or existing APIs are changed, an API test case should be added to the -:file:`senlin_tempest_plugin/tests/api` sub-directory, based on the resources impacted -by the change. - -Each test case should derive from the class -:class:`senlin_tempest_plugin.tests.api.base.BaseSenlinAPITest`. Positive -test cases should be separated from negative ones. We don't encourage combining -more than one test case into a single method, unless there is an obvious reason. - -To improve the readability of the test cases, Senlin has provided a utility -module which can be leveraged - :file:`senlin_tempest_plugin/common/utils.py`. - - -Running API Tests ------------------ - -Senlin API tests use fake OpenStack drivers to improve the throughput of test -execution. This is because in API tests, we don't care about the details in -how *senlin-engine* is interacting with other services. We care more about the -APIs succeeds in an expected way or fails in a predictable manner. - -Although the senlin engine is talking to fake drivers, the test cases still -need to communicate to the senlin API service as it would in a real -deployment. That means you will have to export your OpenStack credentials -before running the tests. For example, you will source the :file:`openrc` file -when using a devstack environment:: - - $ . $HOME/devstack/openrc - -This will ensure you have environment variables such as ``OS_AUTH_URL``, -``OS_USERNAME`` properly set and exported. The next step is to enter the -:file:`tempest` directory and run the tests there:: - - $ cd /opt/stack/tempest - $ nosetests -v -- senlin - -To run a single test case, you can specify the test case name. For example:: - - $ cd /opt/stack/tempest - $ nosetests -v -- \ - senlin_tempest_plugin.tests.api.clusters.test_cluster_create - -If you prefer running API tests in a virtual environment, you can simply use -the following command:: - - $ cd /opt/stack/senlin - $ tox -e api - - -Functional Tests -~~~~~~~~~~~~~~~~ - -Similar to the API tests, senlin functional tests are also developed based on -the *tempest* framework. Test cases are written using the Tempest Plugin -Interface (see: `tempest_plugin`_). - -.. _`tempest_overview`: https://docs.openstack.org/tempest/latest/ -.. _`tempest_plugin`: https://docs.openstack.org/tempest/latest/plugin - - -Writing Functional Tests ------------------------- - -Functional tests are hosted in the `senlin-tempest-plugin` project. There are current -a limited collection of functional test cases which can be -found under :file:`senlin_tempest_plugin/tests/functional/` subdirectory. In future, -we may add more test cases when needed. The above subdirectory will remain the -home of newly added functional tests. - -When writing functional tests, it is highly desirable that each test case is -designed for a specific use case or story line. - - -Running Functional Tests ------------------------- - -Similar to API tests, you will need to export your OpenStack credentials -before running any functional tests. - -The most straight forward way to run functional tests is to use the virtual -environment defined in the :file:`tox.ini` file, that is:: - - $ cd /opt/stack/senlin - $ tox -e functional - -If you prefer running a particular functional test case, you can do the -following as well:: - - $ cd /opt/stack/senlin - $ python -m testtools.run senlin_tempest_plugin.tests.functional.test_cluster_basic - - -Integration Tests -~~~~~~~~~~~~~~~~~ - -Integration tests are basically another flavor of functional tests. The only -difference from functional tests is that integration tests use real device -drivers so the *senlin-engine* is talking to real services. - - -Writing Integration Tests -------------------------- - -Integration tests are hosted in the `senlin-tempest-plugin` project. Integration tests -are designed to be run at Gerrit gate to ensure that changes to senlin code -won't break its interactions with other (backend) services. -Since OpenStack gate infrastructure is a shared resource pool for all -OpenStack projects, we are supposed to be very careful when adding new test -cases. The test cases added are supposed to focus more on the interaction -between senlin and other services than other things. - -All integration test cases are to be placed under the subdirectory -:file:`senlin_tempest_plugin/tests/integration`. Test cases are expected to be -organized into a small number of story lines that can exercise as many -interactions between senlin and backend services as possible. - -Each "story line" should be organized into a separate class module that -inherits from the ``BaseSenlinIntegrationTest`` class which can be found at -:file:`senlin_tempest_plugin/tests/integration/base.py` file. Each test case should -be annotated with a ``decorators.attr`` annotator and an idempotent ID as shown -below: - -.. code-block:: python - - from tempest.lib import decorators - - from senlin.tests.tempest.integration import base - - - class MyIntegrationTest(base.BaseSenlinIntegrationTest): - - @decorators.attr(type=['integration']) - @decorators.idempotent_id('') - def test_a_sad_story(self): - # Test logic goes here - # ... - - -Running Integration Tests -------------------------- - -The integration tests are designed to be executed at Gerrit gate. However, you -can still run them locally in your development environment, i.e. a devstack -installation. - -To run integration tests, you will need to configure *tempest* accounts by -editing the :file:`/etc/tempest/accounts.yaml` file. For each entry of the -tempest account, you will need to provide values for ``username``, -``tenant_name``, ``password`` at least. For example: - -.. code-block:: yaml - - - username: 'demo' - tenant_name: 'demo' - password: 'secretee' - -After this is configured, you can run a specific test case using the following -command: - -.. code-block:: console - - $ cd /opt/stack/senlin - $ python -m testtools.run \ - senlin_tempest_plugin.tests.integration.test_nova_server_cluster - - -Writing Stress Test Cases -------------------------- - - - - -Running Stress Tests --------------------- - - diff --git a/doc/source/ext/__init__.py b/doc/source/ext/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/doc/source/ext/resources.py b/doc/source/ext/resources.py deleted file mode 100644 index b02800005..000000000 --- a/doc/source/ext/resources.py +++ /dev/null @@ -1,291 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -*- coding: utf-8 -*- - -from docutils import nodes -from docutils.parsers import rst -from docutils.parsers.rst import directives -from functools import cmp_to_key -from oslo_utils import importutils -from sphinx.util import logging - -from senlin.common import schema - -LOG = logging.getLogger(__name__) - - -class SchemaDirective(rst.Directive): - required_arguments = 0 - optional_arguments = 0 - final_argument_whitespace = True - option_spec = {'package': directives.unchanged} - has_content = False - add_index = True - section_title = 'Spec' - properties_only = False - - def run(self): - """Build doctree nodes consisting for the specified schema class - - :returns: doctree node list - """ - - # gives you access to the options of the directive - options = self.options - - content = [] - - # read in package class - obj = importutils.import_class(options['package']) - - # skip other spec properties if properties_only is True - if not self.properties_only: - section = self._create_section(content, 'spec', - title=self.section_title) - - # create version section - version_section = self._create_section(section, 'version', - title='Latest Version') - field = nodes.line('', obj.VERSION) - version_section.append(field) - - # build versions table - version_tbody = self._build_table( - section, 'Available Versions', - ['Version', 'Status', 'Supported Since']) - sorted_versions = sorted(obj.VERSIONS.items()) - for version, support_status in sorted_versions: - for support in support_status: - cells = [version] - sorted_support = sorted(support.items(), reverse=True) - cells += [x[1] for x in sorted_support] - self._create_table_row(cells, version_tbody) - - # create applicable profile types - profile_type_description = ('This policy is designed to handle ' - 'the following profile types:') - profile_type_section = self._create_section( - section, 'profile_types', title='Applicable Profile Types') - field = nodes.line('', profile_type_description) - profile_type_section.append(field) - for profile_type in obj.PROFILE_TYPE: - profile_type_section += self._create_list_item(profile_type) - - # create actions handled - policy_trigger_description = ('This policy is triggered by the ' - 'following actions during the ' - 'respective phases:') - target_tbody = self._build_table( - section, 'Policy Triggers', - ['Action', 'Phase'], - policy_trigger_description - ) - sorted_targets = sorted(obj.TARGET, key=lambda tup: tup[1]) - for phase, action in sorted_targets: - cells = [action, phase] - self._create_table_row(cells, target_tbody) - - # build properties - properties_section = self._create_section(section, 'properties', - title='Properties') - else: - properties_section = content - - sorted_schema = sorted(obj.properties_schema.items(), - key=cmp_to_key(self._sort_by_type)) - for k, v in sorted_schema: - self._build_properties(k, v, properties_section) - - # we return the result - return content - - def _create_section(self, parent, sectionid, title=None, term=None): - """Create a new section - - :returns: If term is specified, returns a definition node contained - within the newly created section. Otherwise return the newly created - section node. - """ - - idb = nodes.make_id(sectionid) - section = nodes.section(ids=[idb]) - parent.append(section) - - if term: - if term != '**': - section.append(nodes.term('', term)) - - definition = nodes.definition() - section.append(definition) - - return definition - - if title: - section.append(nodes.title('', title)) - - return section - - def _create_list_item(self, str): - """Creates a new list item - - :returns: List item node - """ - para = nodes.paragraph() - para += nodes.strong('', str) - - item = nodes.list_item() - item += para - - return item - - def _create_def_list(self, parent): - """Creates a definition list - - :returns: Definition list node - """ - - definition_list = nodes.definition_list() - parent.append(definition_list) - - return definition_list - - def _sort_by_type(self, x, y): - """Sort two keys so that map and list types are ordered last.""" - - x_key, x_value = x - y_key, y_value = y - - # if both values are map or list, sort by their keys - if ((isinstance(x_value, schema.Map) or - isinstance(x_value, schema.List)) and - (isinstance(y_value, schema.Map) or - isinstance(y_value, schema.List))): - return (x_key > y_key) - (x_key < y_key) - - # show simple types before maps or list - if (isinstance(x_value, schema.Map) or - isinstance(x_value, schema.List)): - return 1 - - if (isinstance(y_value, schema.Map) or - isinstance(y_value, schema.List)): - return -1 - - return (x_key > y_key) - (x_key < y_key) - - def _create_table_row(self, cells, parent): - """Creates a table row for cell in cells - - :returns: Row node - """ - - row = nodes.row() - parent.append(row) - - for c in cells: - entry = nodes.entry() - row += entry - entry += nodes.literal(text=c) - - return row - - def _build_table(self, section, title, headers, description=None): - """Creates a table with given title, headers and description - - :returns: Table body node - """ - - table_section = self._create_section(section, title, title=title) - - if description: - field = nodes.line('', description) - table_section.append(field) - - table = nodes.table() - tgroup = nodes.tgroup(len(headers)) - table += tgroup - - table_section.append(table) - - for _ in headers: - tgroup.append(nodes.colspec(colwidth=1)) - - # create header - thead = nodes.thead() - tgroup += thead - self._create_table_row(headers, thead) - - tbody = nodes.tbody() - tgroup += tbody - - # create body consisting of targets - tbody = nodes.tbody() - tgroup += tbody - - return tbody - - def _build_properties(self, k, v, definition): - """Build schema property documentation - - :returns: None - """ - - if isinstance(v, schema.Map): - newdef = self._create_section(definition, k, term=k) - - if v.schema is None: - # if it's a map for arbritary values, only include description - field = nodes.line('', v.description) - newdef.append(field) - return - - newdeflist = self._create_def_list(newdef) - - sorted_schema = sorted(v.schema.items(), - key=cmp_to_key(self._sort_by_type)) - for key, value in sorted_schema: - self._build_properties(key, value, newdeflist) - elif isinstance(v, schema.List): - newdef = self._create_section(definition, k, term=k) - - # identify next section as list properties - field = nodes.line() - emph = nodes.emphasis('', 'List properties:') - field.append(emph) - newdef.append(field) - - newdeflist = self._create_def_list(newdef) - - self._build_properties('**', v.schema['*'], newdeflist) - else: - newdef = self._create_section(definition, k, term=k) - if 'description' in v: - field = nodes.line('', v['description']) - newdef.append(field) - else: - field = nodes.line('', '++') - newdef.append(field) - - -class SchemaProperties(SchemaDirective): - properties_only = True - - -class SchemaSpec(SchemaDirective): - section_title = 'Spec' - properties_only = False - - -def setup(app): - app.add_directive('schemaprops', SchemaProperties) - app.add_directive('schemaspec', SchemaSpec) diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index d4c30119a..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,226 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================================== -Welcome to the Senlin documentation! -==================================== - -1 Introduction -~~~~~~~~~~~~~~ - -Senlin is a service to create and manage :term:`Cluster` of multiple cloud -resources. Senlin provides an OpenStack-native REST API and a AWS -AutoScaling-compatible Query API is in plan. - -.. toctree:: - :maxdepth: 1 - - overview - install/index - configuration/index - -2 Tutorial -~~~~~~~~~~ - -This tutorial walks you through the Senlin features step-by-step. For more -details, please check the :ref:`user-references` section. - -.. toctree:: - :maxdepth: 1 - - tutorial/basics - tutorial/policies - tutorial/receivers - tutorial/autoscaling - -.. _user-references: - -3 User References -~~~~~~~~~~~~~~~~~ - -This section provides a detailed documentation for the concepts and built-in -policy types. - -3.1 Basic Concepts ------------------- - -.. toctree:: - :maxdepth: 1 - - user/profile_types - user/profiles - user/clusters - user/nodes - user/membership - user/policy_types - user/policies - user/bindings - user/receivers - user/actions - user/events - -3.2 Built-in Policy Types -------------------------- - -The senlin service is released with some built-in policy types that target -some common use cases. You can develop and deploy your own policy types by -following the instructions in the :ref:`developer-guide` section. - -The following is a list of builtin policy types: - -.. toctree:: - :maxdepth: 1 - - user/policy_types/affinity - user/policy_types/batch - user/policy_types/deletion - user/policy_types/health - user/policy_types/load_balancing - user/policy_types/scaling - user/policy_types/region_placement - user/policy_types/zone_placement - -3.3 Built-in Profile Types --------------------------- - -The senlin service is released with some built-in profile types that target -some common use cases. You can develop and deploy your own profile types by -following the instructions in the :ref:`developer-guide` section. - -The following is a list of builtin profile types: - -.. toctree:: - :maxdepth: 1 - - user/profile_types/nova - user/profile_types/stack - user/profile_types/docker - -4 Usage Scenarios -~~~~~~~~~~~~~~~~~ - -This section provides some guides for typical usage scenarios. More scenarios -are to be added. - -4.1 Managing Node Affinity --------------------------- - -Senlin provides an :doc:`Affinity Policy ` for -managing node affinity. This section contains a detailed introduction on how -to use it. - -.. toctree:: - :maxdepth: 1 - - scenarios/affinity - -4.2 Building AutoScaling Clusters ---------------------------------- - -.. toctree:: - :maxdepth: 1 - - scenarios/autoscaling_overview - scenarios/autoscaling_ceilometer - scenarios/autoscaling_heat - - -.. _developer-guide: - -5. Developer's Guide -~~~~~~~~~~~~~~~~~~~~ - -This section targets senlin developers. - -5.1 Understanding the Design ----------------------------- - -.. toctree:: - :maxdepth: 1 - - contributor/api_microversion - contributor/authorization - contributor/profile - contributor/cluster - contributor/node - contributor/policy - contributor/action - contributor/receiver - contributor/testing - contributor/plugin_guide - contributor/osprofiler - -5.2 Built-in Policy Types -------------------------- - -Senlin provides some built-in policy types which can be instantiated and then -attached to your clusters. These policy types are designed to be orthogonal so -that each of them can be used independently. They are also expected to work -in a collaborative way to meet the needs of complicated usage scenarios. - -.. toctree:: - :maxdepth: 1 - - contributor/policies/affinity_v1 - contributor/policies/deletion_v1 - contributor/policies/health_v1 - contributor/policies/load_balance_v1 - contributor/policies/region_v1 - contributor/policies/scaling_v1 - contributor/policies/zone_v1 - -5.3 Reviewing Patches ---------------------- - -There are many general guidelines across the community about code reviews, for -example: - -- `Code review guidelines (wiki)`_ -- `OpenStack developer's guide`_ - -Besides these guidelines, senlin has some additional amendments based on daily -review experiences that should be practiced. - -.. toctree:: - :maxdepth: 1 - - contributor/reviews - -6 Administering Senlin -~~~~~~~~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - admin/index - - -7 References -~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - reference/man/index - reference/glossary - reference/api - - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`search` - -.. _`Code review guidelines (wiki)`: https://wiki.openstack.org/wiki/CodeReviewGuidelines -.. _`OpenStack developer's guide`: https://docs.openstack.org/infra/manual/developers.html diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 349d5c71f..000000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Installing Senlin -================= - -.. toctree:: - :maxdepth: 2 - - install-devstack.rst - install-source.rst - install-rdo.rst - verify.rst - -This chapter assumes a working setup of OpenStack following the -`OpenStack Installation Tutorial `_. - diff --git a/doc/source/install/install-devstack.rst b/doc/source/install/install-devstack.rst deleted file mode 100644 index a980d83a7..000000000 --- a/doc/source/install/install-devstack.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _install-devstack: - -==================== -Install via Devstack -==================== - -This is the recommended way to install the Senlin service. Please refer to -following detailed instructions. - -1. Download DevStack:: - - $ git clone https://git.openstack.org/openstack-dev/devstack - $ cd devstack - -2. Add following repo as external repositories into your ``local.conf`` file:: - - [[local|localrc]] - #Enable senlin - enable_plugin senlin https://git.openstack.org/openstack/senlin - #Enable senlin-dashboard - enable_plugin senlin-dashboard https://git.openstack.org/openstack/senlin-dashboard - -Optionally, you can add a line ``SENLIN_USE_MOD_WSGI=True`` to the same ``local.conf`` -file if you prefer running the Senlin API service under Apache. - -3. Run ``./stack.sh``:: - - $ ./stack.sh - -Note that Senlin client is also installed when following the instructions. - - diff --git a/doc/source/install/install-rdo.rst b/doc/source/install/install-rdo.rst deleted file mode 100644 index a5e005803..000000000 --- a/doc/source/install/install-rdo.rst +++ /dev/null @@ -1,246 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _install-rdo: - -=============== -Install via RDO -=============== - -This section describes how to install and configure the Senlin service -for Red Hat Enterprise Linux 7 and CentOS 7. - -This install file support from ``pike`` version. - -Prerequisites -------------- - -Before you install and configure Senlin, you must create a -database, service credentials, and API endpoints. Senlin also -requires additional information in the Identity service. - -1. To create the database, complete these steps: - -* Use the database access client to connect to the database - server as the ``root`` user: - -:: - - $ mysql -u root -p - -* Create the ``senlin`` database: - -:: - - CREATE DATABASE senlin DEFAULT CHARACTER SET utf8; - -* Grant proper access to the ``senlin`` database: - -:: - - GRANT ALL ON senlin.* TO 'senlin'@'localhost' \ - IDENTIFIED BY 'SENLIN_DBPASS'; - GRANT ALL ON senlin.* TO 'senlin'@'%' \ - IDENTIFIED BY 'SENLIN_DBPASS'; - -Replace ``Senlin_DBPASS`` with a suitable password. - -* Exit the database access client. - -2. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - -:: - - $ . admin-openrc - -3. To create the service credentials, complete these steps: - -* Create the ``senlin`` user: - -:: - - $openstack user create --project service --password-prompt senlin - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | e0353a670a9e496da891347c589539e9 | - | enabled | True | - | id | ca2e175b851943349be29a328cc5e360 | - | name | senlin | - +-----------+----------------------------------+ - -* Add the ``admin`` role to the ``senlin`` user: - -:: - - $ openstack role add --project service --user senlin admin - - .. note:: - - This command provides no output. - -* Create the ``senlin`` service entities: - -:: - - $ openstack service create --name senlin \ - --description "Senlin Clustering Service V1" clustering - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Senlin Clustering Service V1 | - | enabled | True | - | id | 727841c6f5df4773baa4e8a5ae7d72eb | - | name | senlin | - | type | clustering | - +-------------+----------------------------------+ - -4. Create the senlin service API endpoints: - -:: - - $ openstack endpoint create senlin --region RegionOne \ - public http://controller:8777 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 90485e3442544509849e3c79bf93c15d | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 9130295921b04601a81f95c417b9f113 | - | service_name | senlin | - | service_type | clustering | - | url | http://controller:8777 | - +--------------+----------------------------------+ - - $ openstack endpoint create senlin --region RegionOne \ - admin http://controller:8777 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | d4a9f5a902574479a73e520dd3f93dfb | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 9130295921b04601a81f95c417b9f113 | - | service_name | senlin | - | service_type | clustering | - | url | http://controller:8777 | - +--------------+----------------------------------+ - - $ openstack endpoint create senlin --region RegionOne \ - internal http://controller:8777 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | d119b192857e4760a196ba2b88d20bc6 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 9130295921b04601a81f95c417b9f113 | - | service_name | senlin | - | service_type | clustering | - | url | http://controller:8777 | - +--------------+----------------------------------+ - -Install and configure components --------------------------------- - -.. note:: - - Default configuration files vary by distribution. You might need - to add these sections and options rather than modifying existing - sections and options. Also, an ellipsis (``...``) in the configuration - snippets indicates potential default configuration options that you - should retain. - -1. Install the packages: - -:: - - # yum install openstack-senlin-api.noarch \ - openstack-senlin-common.noarch \ - openstack-senlin-conductor.noarch \ - openstack-senlin-engine.noarch \ - openstack-senlin-health-manager.noarch \ - python3-senlinclient.noarch - -2. Edit file :file:`/etc/senlin/senlin.conf` according to your system settings. The most common options to be customized include: - -:: - - [database] - connection = mysql+pymysql://senlin:@127.0.0.1/senlin?charset=utf8 - - [keystone_authtoken] - service_token_roles_required = True - auth_type = password - user_domain_name = Default - project_domain_name = Default - project_name = service - username = senlin - password = - www_authenticate_uri = http:///identity/v3 - auth_url = http:///identity - - [authentication] - auth_url = http://:5000/v3 - service_username = senlin - service_password = - service_project_name = service - - [oslo_messaging_rabbit] - rabbit_userid = - rabbit_hosts = - rabbit_password = - - [oslo_messaging_notifications] - driver = messaging - -For more comprehensive helps on configuration options, please refer to -:doc:`Configuration Options ` documentation. - - -3. Populate the Senlin database: - -:: - - # senlin-manage db_sync - - .. note:: - - Ignore any deprecation messages in this output. - -Finalize installation ---------------------- - -* Start the Senlin services and configure them to start - when the system boots: - -:: - - # systemctl enable openstack-senlin-api.service \ - openstack-senlin-conductor.service \ - openstack-senlin-engine.service \ - openstack-senlin-health-manager.service - # systemctl start openstack-senlin-api.service \ - openstack-senlin-conductor.service \ - openstack-senlin-engine.service \ - openstack-senlin-health-manager.service diff --git a/doc/source/install/install-source.rst b/doc/source/install/install-source.rst deleted file mode 100644 index 7076918e9..000000000 --- a/doc/source/install/install-source.rst +++ /dev/null @@ -1,145 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _install-source: - -============================ -Install from Git Source Code -============================ - -Install Senlin Server ---------------------- - -1. Get Senlin source code from OpenStack git repository. - -:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/senlin.git - -2. Install Senlin with required packages. - -:: - - $ cd /opt/stack/senlin - $ sudo pip install -e . - -3. Register Senlin clustering service with keystone. - - This can be done using the :command:`setup-service` script under the - :file:`tools` folder. - - **NOTE:** Suppose you have devstack installed under the - :file:`/opt/devstack` folder - -:: - - $ . /opt/devstack/openrc admin admin - $ cd /opt/stack/senlin/tools - $ ./setup-service - -4. Generate configuration file for the Senlin service. - -:: - - $ cd /opt/stack/senlin - $ tools/gen-config - $ sudo mkdir /etc/senlin - $ sudo cp etc/senlin/api-paste.ini /etc/senlin - $ sudo cp etc/senlin/senlin.conf.sample /etc/senlin/senlin.conf - -Edit file :file:`/etc/senlin/senlin.conf` according to your system settings. -The most common options to be customized include: - -:: - - [database] - connection = mysql+pymysql://senlin:@127.0.0.1/senlin?charset=utf8 - - [keystone_authtoken] - service_token_roles_required = True - auth_type = password - user_domain_name = Default - project_domain_name = Default - project_name = service - username = senlin - password = - www_authenticate_uri = http:///identity/v3 - auth_url = http:///identity - - [authentication] - auth_url = http://:5000/v3 - service_username = senlin - service_password = - service_project_name = service - - [oslo_messaging_rabbit] - rabbit_userid = - rabbit_hosts = - rabbit_password = - - [oslo_messaging_notifications] - driver = messaging - -For more comprehensive helps on configuration options, please refer to -:doc:`Configuration Options ` documentation. - -In case you want to modify access policies of Senlin, please generate sample -policy file, copy it to `/etc/senlin/policy.yaml` and then update it. - -:: - - $ cd /opt/stack/senlin - $ tools/gen-policy - $ sudo cp etc/senlin/policy.yaml.sample /etc/senlin/policy.yaml - -5. Create Senlin Database. - -Create Senlin database using the :command:`senlin-db-recreate` script under -the :file:`tools` subdirectory. Before calling the script, you need edit it -to customize the password you will use for the ``senlin`` user. You need to -update this script with the entered in step4. - -:: - - $ cd /opt/stack/senlin/tools - $ ./senlin-db-recreate - -6. Start the senlin api, conductor, engine and health-manager services. - -You may need multiple consoles for the services i.e., one for each service. - -:: - - $ senlin-conductor --config-file /etc/senlin/senlin.conf - $ senlin-engine --config-file /etc/senlin/senlin.conf - $ senlin-health-manager --config-file /etc/senlin/senlin.conf - $ senlin-api --config-file /etc/senlin/senlin.conf - -Install Senlin Client ---------------------- - -1. Get Senlin client code from OpenStack git repository. - -:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/python-senlinclient.git - -2. Install senlin client. - -:: - - $ cd python-senlinclient - $ sudo python setup.py install - diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index 4f2515bc6..000000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _verify: - -======================== -Verify Your Installation -======================== - -Verify operation of the Cluster service. - - -.. note:: - - Perform these commands on the controller node. - -#. Source the ``admin`` tenant credentials: - - .. code-block:: console - - $ . admin-openrc - -#. List service components to verify successful launch and - registration of each process: - - .. code-block:: console - - $ openstack cluster build info - +--------+---------------------+ - | Field | Value | - +--------+---------------------+ - | api | { | - | | "revision": "1.0" | - | | } | - | engine | { | - | | "revision": "1.0" | - | | } | - +--------+---------------------+ - -You are ready to begin your journey (aka. adventure) with Senlin, now. diff --git a/doc/source/overview.rst b/doc/source/overview.rst deleted file mode 100644 index 2be2c1d3f..000000000 --- a/doc/source/overview.rst +++ /dev/null @@ -1,80 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _guide-overview: - -======== -Overview -======== - -Senlin is a **clustering service** for OpenStack clouds. It creates and -operates clusters of homogeneous objects exposed by other OpenStack services. -The goal is to make orchestration of collections of similar objects easier. - -Senlin interacts with other OpenStack services so that clusters of resources -exposed by those services can be created and operated. These interactions are -mostly done through the via :term:`profile` plugins. Each profile type -implementation enable Senlin to create, update, delete a specific type of -resources. - -A :term:`Cluster` can be associated with different :term:`Policy` objects -that can be checked/enforced at varying enforcement levels. Through service -APIs, a user can dynamically add :term:`Node` to and remove node from a -cluster, attach and detach policies, such as *creation policy*, *deletion -policy*, *load-balancing policy*, *scaling policy*, *health policy* etc. -Through integration with other OpenStack projects, users will be enabled to -manage deployments and orchestrations large-scale resource pools much easier. - -Senlin is designed to be capable of managing different types of objects. An -object's lifecycle is managed using :term:`Profile Type` implementations, -which are plugins that can be dynamically loaded by the service engine. - -Components -~~~~~~~~~~ - -The developers are focusing on creating an OpenStack style project using -OpenStack design tenets, implemented in Python. We have started with a close -interaction with Heat project. - -senlinclient ------------- - -The :program:`senlinclient` package provides a plugin for the openstackclient -tool so you have a command line interface to communicate with -the :program:`senlin-api` to manage clusters, nodes, profiles, policies, -actions and events. End developers could also use the Senlin REST API directly. - -senlin-dashboard ----------------- -The :program:`senlin-dashboard` is a Horizon plugin that provides a UI for -senlin. - -senlin-api ----------- - -The :program:`senlin-api` component provides an OpenStack-native REST API that -processes API requests by sending them to the :program:`senlin-engine` over RPC. - -senlin-engine -------------- - -The :program:`senlin-engine`'s main responsibility is to create and orchestrate -the clusters, nodes, profiles and policies. - - -Installation -~~~~~~~~~~~~ - -You will need to make sure you have a suitable environment for deploying -Senlin. Please refer to :doc:`Installation ` for detailed -instructions on setting up an environment to use the Senlin service. diff --git a/doc/source/reference/api.rst b/doc/source/reference/api.rst deleted file mode 100644 index 45c84435e..000000000 --- a/doc/source/reference/api.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -API Documentation ------------------ - -Follow the link below for the Senlin API V1 specification: - -- `OpenStack API Complete Reference - Clustering`_ - - - -.. _`OpenStack API Complete Reference - Clustering`: https://docs.openstack.org/api-ref/clustering/ diff --git a/doc/source/reference/glossary.rst b/doc/source/reference/glossary.rst deleted file mode 100644 index 3eda3b798..000000000 --- a/doc/source/reference/glossary.rst +++ /dev/null @@ -1,146 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======== -Glossary -======== - -This section contains the glossary for the Senlin service. - -.. glossary:: - :sorted: - - Action - An action is an operation that can be performed on a :term:`Cluster` or a - :term:`Node` etc. Different types of objects support different set of - actions. An action is executed by a :term:`Worker` thread when the action - becomes READY. Most Senlin APIs create actions in database for worker - threads to execute asynchronously. An action, when executed, will check - and enforce :term:`Policy` associated with the cluster. An action can be - triggered via :term:`Receiver`. - - API server - HTTP REST API service for Senlin. - - Cluster - A cluster is a group of homogeneous objects (i.e. :term:`Node`). A - cluster consists of 0 or more nodes and it can be associated with 0 or - more :term:`Policy` objects. It is associated with a :term:`Profile Type` - when created. - - Dependency - The :term:`Action` objects are stored into database for execution. These - actions may have dependencies among them. - - Dispatcher - A dispatcher is a processor that takes a Senlin :term:`Action` as input - and then converts it into a desired format for storage or further - processing. - - Driver - A driver is a Senlin internal module that enables Senlin :term:`Engine` to - interact with other :term:`OpenStack` services. The interactions here are - usually used to create, destroy, update the objects exposed by those - services. - - Engine - The daemon that actually perform the operations requested by users. It - provides RPC interfaces to RPC clients. - - Environment - Used to specify user provided :term:`Plugin` that implement a - :term:`Profile Type` or a :term:`Policy Type`. User can provide plugins - that override the default plugins by customizing an environment. - - Event - An event is a record left in Senlin database when something matters to - users happened. An event can be of different criticality levels. - - Index - An integer property of a :term:`Node` when it is a member of a - :term:`Cluster`. Each node has an auto-generated index value that is - unique in the cluster. - - Node - A node is an object that belongs to at most one :term:`Cluster`. A node - can become an 'orphaned node' when it is not a member of any clusters. - All nodes in a cluster must be of the same :term:`Profile Type` of the - owning cluster. In general, a node represents a physical object exposed - by other OpenStack services. A node has a unique :term:`Index` value - scoped to the cluster that owns it. - - Permission - A string dictating which user (role or group) has what permissions on a - given object (i.e. :term:`Cluster`, :term:`Node`, :term:`Profile` and - :term:`Policy` etc.) - - Plugin - A plugin is an implementation of a :term:`Policy Type` or :term:`Profile - Type` that can be dynamically loaded and registered to Senlin engine. - Senlin engine comes with a set of builtin plugins. Users can add their own - plugins by customizing the :term:`Environment` configuration. - - Policy - A policy is a set of rules that can be checked and/or enforced when an - :term:`Action` is performed on a :term:`Cluster`. A policy is an instance - of a particular :term:`Policy Type`. Users can specify the enforcement - level when creating a policy object. Such a policy object can be attached - to and detached from a cluster. - - Policy Type - A policy type is an abstraction of :term:`Policy` objects. The - implementation of a policy type specifies when the policy should be - checked and/or enforce, what profile types are supported, what operations - are to be done before, during and after each :term:`Action`. All policy - types are provided as Senlin plugins. - - Profile - A profile is a mould used for creating objects (i.e. :term:`Node`). A - profile is an instance of a :term:`Profile Type` with all required - information specified. Each profile has a unique ID. As a guideline, a - profile cannot be updated once created. To change a profile, you have to - create a new instance. - - Profile Type - A profile type is an abstraction of objects that are backed by some - :term:`Driver`. The implementation of a profile type calls the driver(s) - to create objects that are managed by Senlin. The implementation also - serves a factory that can "produce" objects given a profile. All profile - types are provided as Senlin plugins. - - Role - A role is a string property that can be assigned to a :term:`Node`. - Nodes in the same cluster may assume a role for certain reason such as - application configuration. The default role for a node is empty. - - OpenStack - Open source software for building private and public clouds. - - Receiver - A receiver is an abstract resource created at the senlin engine that can - be used to hook the engine to some external event/alarm sources. A - receiver can be of different types. The most common type is a - :term:`Webhook`. - - Webhook - A webhook is an encoded URI (Uniform Resource Identifier) that for - triggering some operations (e.g. Senlin actions) on some resources. Such - a webhook URL is the only thing one needs to know to trigger an action on - a cluster. - - Worker - A worker is the thread created and managed by Senlin engine to execute - an :term:`Action` that becomes ready. When the current action completes - (with a success or failure), a worker will check the database to find - another action for execution. diff --git a/doc/source/reference/man/index.rst b/doc/source/reference/man/index.rst deleted file mode 100644 index 092d726ef..000000000 --- a/doc/source/reference/man/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -========= -Man Pages -========= - - -Senlin services -~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - senlin-conductor - senlin-engine - senlin-health-manager - senlin-api - - -Senlin utilities -~~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - senlin-manage - senlin-status diff --git a/doc/source/reference/man/senlin-api.rst b/doc/source/reference/man/senlin-api.rst deleted file mode 100644 index da74ed512..000000000 --- a/doc/source/reference/man/senlin-api.rst +++ /dev/null @@ -1,51 +0,0 @@ -========== -senlin-api -========== - -.. program:: senlin-api - -SYNOPSIS -~~~~~~~~ - -``senlin-api [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-api provides an external REST API to the Senlin service. - -INVENTORY -~~~~~~~~~ - -senlin-api is a WSGI application that exposes an external REST style API to -the Senlin service. senlin-api communicates with senlin-engine using Remote -Procedure Calls (RPC), which is based on AMQP protocol. - -OPTIONS -~~~~~~~ - -.. cmdoption:: --config-file - - Path to a config file to use. Multiple config files can be specified, with - values in later files taking precedence. - - -.. cmdoption:: --config-dir - - Path to a config directory to pull .conf files from. This file set is - sorted, so as to provide a predictable parse order if individual options are - over-ridden. The set is parsed after the file(s), if any, specified via - --config-file, hence over-ridden options in the directory take precedence. - -FILES -~~~~~ - -* /etc/senlin/senlin.conf -* /etc/senlin/api-paste.ini -* /etc/senlin/policy.yaml - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-conductor.rst b/doc/source/reference/man/senlin-conductor.rst deleted file mode 100644 index 177ecfd8c..000000000 --- a/doc/source/reference/man/senlin-conductor.rst +++ /dev/null @@ -1,47 +0,0 @@ -================ -senlin-conductor -================ - -.. program:: senlin-conductor - -SYNOPSIS -~~~~~~~~ - -``senlin-conductor [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-conductor provides an internal RPC interface for the senlin-api to -invoke. - -INVENTORY -~~~~~~~~~ - -The senlin-conductor provides an internal RPC interface. - -OPTIONS -~~~~~~~ -.. cmdoption:: --config-file - - Path to a config file to use. Multiple config files can be specified, with - values in later files taking precedence. - - -.. cmdoption:: --config-dir - - Path to a config directory to pull .conf files from. This file set is - sorted, so as to provide a predictable parse order if individual options are - over-ridden. The set is parsed after the file(s), if any, specified via - --config-file, hence over-ridden options in the directory take precedence. - -FILES -~~~~~ - -* /etc/senlin/senlin.conf - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-engine.rst b/doc/source/reference/man/senlin-engine.rst deleted file mode 100644 index 0482ae0fb..000000000 --- a/doc/source/reference/man/senlin-engine.rst +++ /dev/null @@ -1,48 +0,0 @@ -============= -senlin-engine -============= - -.. program:: senlin-engine - -SYNOPSIS -~~~~~~~~ - -``senlin-engine [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-engine is the server that perform operations on objects such as -nodes, policies and profiles. - -INVENTORY -~~~~~~~~~ - -The senlin-engine provides services to the callers so that requests on -various objects can be met by background operations. - -OPTIONS -~~~~~~~ -.. cmdoption:: --config-file - - Path to a config file to use. Multiple config files can be specified, with - values in later files taking precedence. - - -.. cmdoption:: --config-dir - - Path to a config directory to pull .conf files from. This file set is - sorted, so as to provide a predictable parse order if individual options are - over-ridden. The set is parsed after the file(s), if any, specified via - --config-file, hence over-ridden options in the directory take precedence. - -FILES -~~~~~ - -* /etc/senlin/senlin.conf - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-health-manager.rst b/doc/source/reference/man/senlin-health-manager.rst deleted file mode 100644 index 21885f5fc..000000000 --- a/doc/source/reference/man/senlin-health-manager.rst +++ /dev/null @@ -1,48 +0,0 @@ -===================== -senlin-health-manager -===================== - -.. program:: senlin-health-manager - -SYNOPSIS -~~~~~~~~ - -``senlin-health-manager [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-health-manager is the server that is responsible for cluster health -related operations. - -INVENTORY -~~~~~~~~~ - -The senlin-health-manager provides services to the callers so that various -cluster health related operations can be performed in the background. - -OPTIONS -~~~~~~~ -.. cmdoption:: --config-file - - Path to a config file to use. Multiple config files can be specified, with - values in later files taking precedence. - - -.. cmdoption:: --config-dir - - Path to a config directory to pull .conf files from. This file set is - sorted, so as to provide a predictable parse order if individual options are - over-ridden. The set is parsed after the file(s), if any, specified via - --config-file, hence over-ridden options in the directory take precedence. - -FILES -~~~~~ - -* /etc/senlin/senlin.conf - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-manage.rst b/doc/source/reference/man/senlin-manage.rst deleted file mode 100644 index bd027b37f..000000000 --- a/doc/source/reference/man/senlin-manage.rst +++ /dev/null @@ -1,98 +0,0 @@ -============= -senlin-manage -============= - -.. program:: senlin-manage - -SYNOPSIS -~~~~~~~~ - -``senlin-manage [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-manage provides utilities for operators to manage Senlin specific -maintenance operations. - - -OPTIONS -~~~~~~~ - -To issue a senlin-manage command: - -``senlin-manage [options]`` - -Run with `-h` or `--help` to see a list of available commands: - -``senlin-manage -h`` - -Commands are `db_version`, `db_sync`, `service`, `event_purge` . Below are -some detailed descriptions. - - -Senlin DB version ------------------ - -``senlin-manage db_version`` - -Print out the db schema revision. - -``senlin-manage db_sync`` - -Sync the database up to the most recent version. - - -Senlin Service Manage ---------------------- - -``senlin-manage service list`` - -Print out the senlin-engine service status. - -``senlin-manage service clean`` - -Cleanup senlin-engine dead service. - - -Senlin Event Manage -------------------- - -``senlin-manage event_purge -p [] -g {days,hours,minutes,seconds} age`` - -Purge the specified event records in senlin's database. - -You can use command purge three days ago data. - -:: - - senlin-manage event_purge -p e127900ee5d94ff5aff30173aa607765 -g days 3 - - -Senlin Action Manage --------------------- - -``senlin-manage action_purge -p [] -g {days,hours,minutes,seconds} age`` - -Purge the specified action records in senlin's database. - -You can use this command to purge actions that are older than 3 days. - -:: - - senlin-manage action_purge -p e127900ee5d94ff5aff30173aa607765 -g days 3 - - -FILES -~~~~~ - -The /etc/senlin/senlin.conf file contains global options which can be -used to configure some aspects of `senlin-manage`, for example the DB -connection and logging options. - - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-status.rst b/doc/source/reference/man/senlin-status.rst deleted file mode 100644 index c18d2c684..000000000 --- a/doc/source/reference/man/senlin-status.rst +++ /dev/null @@ -1,78 +0,0 @@ -============= -senlin-status -============= - -Synopsis -======== - -:: - - senlin-status [] - -Description -=========== - -:program:`senlin-status` is a tool that provides routines for checking the -status of a Senlin deployment. - -Options -======= - -The standard pattern for executing a :program:`senlin-status` command is:: - - senlin-status [] - -Run without arguments to see a list of available command categories:: - - senlin-status - -Categories are: - -* ``upgrade`` - -Detailed descriptions are below. - -You can also run with a category argument such as ``upgrade`` to see a list of -all commands in that category:: - - senlin-status upgrade - -These sections describe the available categories and arguments for -:program:`senlin-status`. - -Upgrade -~~~~~~~ - -.. _senlin-status-checks: - -``senlin-status upgrade check`` - Performs a release-specific readiness check before restarting services with - new code. This command expects to have complete configuration and access - to databases and services. - - **Return Codes** - - .. list-table:: - :widths: 20 80 - :header-rows: 1 - - * - Return code - - Description - * - 0 - - All upgrade readiness checks passed successfully and there is nothing - to do. - * - 1 - - At least one check encountered an issue and requires further - investigation. This is considered a warning but the upgrade may be OK. - * - 2 - - There was an upgrade status check failure that needs to be - investigated. This should be considered something that stops an - upgrade. - * - 255 - - An unexpected error occurred. - - **History of Checks** - - **7.0.0 (Stein)** - - * Placeholder to be filled in with checks as they are added in Stein. diff --git a/doc/source/scenarios/affinity.rst b/doc/source/scenarios/affinity.rst deleted file mode 100644 index 05bf09169..000000000 --- a/doc/source/scenarios/affinity.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-scenario-affinity: - -====================== -Managing Node Affinity -====================== - -When deploying multiple nodes running identical instances of the same service -(or application) for the sake of load-balancing or high-availability, it is -very likely you don't want all nodes deployed onto the same physical machine. -However, when you have a cluster with some nodes playing one role (e.g. -Application Server) and other nodes playing another role (.e.g. Database), -you may want to collocate these nodes onto the same physical machine so that -inter-node communication can be faster. - -To meet these intra-cluster node collocation requirements, you have different -choices. - - -Use Server Group in Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For the purpose of managing cluster node affinity, you may choose to create -a *server group* by invoking nova command line, e.g.: - -:: - - $ openstack server group create sg01 --policy affinity - +--------------+------+------------+---------+---------------+---------+----------+ - | Id | Name | Project Id | User Id | Policies | Members | Metadata | - +--------------+------+------------+---------+---------------+---------+----------+ - | 54a88567-... | sg01 | ... | ... | [u'affinity'] | [] | {} | - +--------------+------+------------+---------+---------------+---------+----------+ - -Then when you create a nova server profile, you can input the name of the -server group into the ``scheduler_hints`` property as shown below: - -:: - - $ cat web_cluster.yaml - type: os.nova.server - version: 1.0 - properties: - name: web_server - - <... other properties go here ...> - - scheduler_hints: - group: sg01 - -Later, when you create a cluster using this profile, the server nodes will be -booted on the same physical host if possible. In other words, the affinity -is managed directly by the nova compute service. If there are no physical -hosts satisfying the constraints, node creation requests will fail. - - -Use Same-Host or Different-Host in Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When adding nodes to an existing cluster, the new nodes can reference a -different profile object of the same profile type (i.e. ``os.nova.server``). -If a new node is expected to be launched on the same/different host from a -set of server nodes, you can specify the constraint as a ``scheduler_hints`` -as well. - -Suppose you have two server nodes in a cluster with UUID "UUID1" and "UUID2" -respectively, you can input the scheduling constraints in a profile as shown -below: - -:: - - $ cat standalone_server.yaml - type: os.nova.server - version: 1.0 - properties: - name: web_server - - <... other properties go here ...> - - scheduler_hints: - different_host: - - UUID1 - - UUID2 - -When adding a node that uses this profile into the cluster, the node creation -either fails (e.g. no available host found) or the node is created -successfully on a different host from the specified server nodes. - -Similarly, you can replace the ``different_host`` key above by ``same_host`` -to instruct that the new node collocated with the specified existing node(s). - - -Managing Affinity using Affinity Policy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Another option to manage node affinity is to use the affinity policy -(see :doc:`Affinity Policy <../user/policy_types/affinity>`). By creating and -attaching an affinity policy to a cluster, you can still control how nodes -are distributed relative to the underlying hosts. See the above link for usage -of the policy. - - -See Also -~~~~~~~~ - -* :doc:`Managing Policies <../user/policies>` -* :doc:`Builtin Policy - Affinity Policy <../user/policy_types/affinity>` diff --git a/doc/source/scenarios/autoscaling_ceilometer.rst b/doc/source/scenarios/autoscaling_ceilometer.rst deleted file mode 100644 index 9fa1676f3..000000000 --- a/doc/source/scenarios/autoscaling_ceilometer.rst +++ /dev/null @@ -1,282 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-scenario-autoscaling-ceilometer: - - -================================= -Autoscaling using Ceilometer/Aodh -================================= - -As a telemetry service, the ceilometer project consists of several sub-projects -which provide metering, monitoring and alarming services in the telemetry -space. This section walks you through the steps to build an auto-scaling -solution by integrating senlin with ceilometer/aodh. - -Step 1: Create a VM cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The first step is to create a profile using a spec file like the following one -and save it to a file, e.g. :file:`sample_server.yaml`: - -.. code-block:: yaml - - type: os.nova.server - version: 1.0 - properties: - name: cirros_server - flavor: m1.tiny - image: cirros-0.3.5-x86_64-disk - key_name: oskey - networks: - - network: private - -Note this spec file assumes that you have a working nova key-pair named -"``oskey``" and there is a network named "``private``". You may need to change -these values based your environment settings. To create a profile using this -spec: - -.. code-block:: console - - $ openstack cluster profile create --spec-file sample_server.yaml pserver - -Then you can create a cluster using the profile named "``pserver``": - -.. code-block:: console - - $ openstack cluster create --profile pserver --desired-capacity 2 mycluster - -You can show cluster details, using the command `openstack cluster show mycluster` - -.. code-block:: console - - $ openstack cluster show mycluster - +------------------+--------------------------------------------------------------------------------+ - | Field | Value | - +------------------+--------------------------------------------------------------------------------+ - | config | {} | - | created_at | 2016-08-01T02:14:38Z | - | data | {} | - | dependents | {} | - | desired_capacity | 2 | - | domain_id | None | - | id | 09e9b90c-03e3-41e3-8a31-e9bde6707585 | - | init_at | 2016-08-01T02:13:59Z | - | location | None | - | max_size | -1 | - | metadata | {} | - | min_size | 0 | - | name | mycluster | - | node_ids | 78509587-fa74-49cb-984f-a2e033316a63 | - | | 8ccc31e6-14a3-4882-b0ef-27108cdb238d | - | profile_id | 8f81a3a5-e91b-4fd5-91f1-e4a04ddae20f | - | profile_name | pserver | - | project_id | e127900ee5d94ff5aff30173aa607765 | - | status | ACTIVE | - | status_reason | CLUSTER_CREATE: number of active nodes is equal or above desired_capacity (2). | - | timeout | 3600 | - | updated_at | 2016-08-01T02:14:38Z | - | user_id | 3914a2df5b7e49e3acbba86044e820ef | - +------------------+--------------------------------------------------------------------------------+ - - -This creates a cluster with 2 nodes created at the beginning. We export the -cluster ID into an environment variable for convenience: - -.. code-block:: console - - $ export MYCLUSTER_ID=10c80bfe-41af-41f7-b9b1-9c81c9e5d21f - -You may want to check the IP addresses assigned to each node. In the output -from the following command, you will find the IP address for the specific node: - -.. code-block:: console - - $ openstack cluster node show 14936837-1459-416b-a1f3-dea026f6cffc --details - ... - | details | +-----------+--------------------------------------+ | - | | | property | value | | - | | +-----------+--------------------------------------+ | - | | | addresses | { | | - | | | | "private": [ | | - | | | | { | | - | | | | "OS-EXT-IPS-MAC:mac-addr": ... | | - | | | | "OS-EXT-IPS:type": "fixed", | | - | | | | "addr": "10.0.0.9", | | - | | | | "version": 4 | | - | | | | } | | - | | | | ] | | - | | | | } | | - | | | flavor | 1 | | - | | | id | 362f57b2-c089-4aab-bab3-1a7ffd4e1834 | | - ... - -We will use these IP addresses later to generate workloads on each nova -server. - -Step 2: Create Receivers -~~~~~~~~~~~~~~~~~~~~~~~~ - -The next step is to create receivers for the cluster for triggering actions on -the cluster. Each receiver is usually created for a specific purpose, so for -different purposes you may need to create more than receivers. - -The following command creates a receiver for scaling out the specified cluster -by two nodes every time it is triggered: - -.. code-block:: console - - $ openstack cluster receiver create --action CLUSTER_SCALE_OUT --params count=2 --cluster mycluster r_01 - +------------+---------------------------------------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------------------------------------+ - | action | CLUSTER_SCALE_OUT | - | actor | { | - | | "trust_id": "432f81d339444cac959bab2fd9ba92fa" | - | | } | - | channel | { | - | | "alarm_url": "http://node1:8777/v1/webhooks/ba...5a/trigger?V=2&count=2 | - | | } | - | cluster_id | b75d25e7-e84d-4742-abf7-d8a3001e25a9 | - | created_at | 2016-08-01T02:17:14Z | - | domain_id | None | - | id | ba13f7cd-7a95-4545-b646-6a833ba6505a | - | location | None | - | name | r_01 | - | params | { | - | | "count": "2" | - | | } | - | project_id | 99185bcde62c478e8d05b702e52d8b8d | - | type | webhook | - | updated_at | None | - | user_id | 6c369aec78b74a4da413f86dadb0255e | - +------------+---------------------------------------------------------------------------------+ - -At present, all property values shown for a receiver are read only. You cannot -change their values once the receiver is created. The only type of receivers -senlin understands is "``webhook``". For the "``action``" parameter, there are -many choices: - -- ``CLUSTER_SCALE_OUT`` -- ``CLUSTER_SCALE_IN`` -- ``CLUSTER_RESIZE`` -- ``CLUSTER_CHECK`` -- ``CLUSTER_UPDATE`` -- ``CLUSTER_DELETE`` -- ``CLUSTER_ADD_NODES`` -- ``CLUSTER_DEL_NODES`` -- ``NODE_CREATE`` -- ``NODE_DELETE`` -- ``NODE_UPDATE`` -- ``NODE_CHECK`` -- ``NODE_RECOVER`` - -Senlin may add supports to more action types in future. - -After a receiver is created, you can check its "``channel``" property value to -find out how to trigger that receiver. For a receiver of type "``webhook``" -(the default and the only supported type as for now), this means you will -check the "``alarm_url``" value. We will use that value later for action -triggering. For convenience, we export that value to an environment variable: - -.. code-block:: console - - $ export ALRM_URL01="http://node1:8777/v1/webhooks/ba...5a/trigger?V=2&count=2" - -Similar to the example above, you can create other receivers for different -kinds of cluster operations or the same cluster operation with different -parameter values. - -Step 3: Creating Aodh Alarms -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once we have the cluster created and prepared to receive external signals, we -can proceed to create alarms using the software/service you deployed. The -following command creates a threshold alarm using aodh alarm service so that: - -- aodh will evaluate the CPU utilization (i.e. ``cpu_util``) metric across the - specified cluster; -- aodh will compute the CPU utilization using the average value during a given - period (i.e. 60 seconds here); -- aodh will perform evaluation at the end of every single period; -- aodh won't trigger alarm actions repeatedly; -- aodh will do metric aggregation based on the specified metadata. - -.. code-block:: console - - $ aodh alarm create \ - --type gnocchi_resources_threshold --name cpu-high \ - --metric cpu_util --threshold 70 --comparison-operator gt \ - --description 'instance running hot' --evaluation-periods 1 \ - --aggregation-method mean --alarm-action $ALRM_URL01 \ - --granularity 600 --repeat-actions False \ - --query metadata.user_metadata.cluster_id=$MYCLUSTER_ID - -Note that we are referencing the two environment variables ``MYCLUSTER_ID`` -and ``ALRM_URL01`` in this command. - -.. note:: - To make aodh aware of the ``cluster_id`` metadata senlin injects into each - and every VM server created, you may need to add the following line into - your :file:`/etc/ceilometer/ceilometer.conf` file:: - - reserved_metadata_keys = cluster_id - - Also note that to make sure your CPU utilization driven metrics are - evaluated at least once per 60 seconds, you will need to change the - ``interval`` value for the ``cpu_source`` in the file - :file:`/etc/ceilometer/pipeline.yaml`. For example, you can change it from - the default value ``600`` to ``60``:: - - sources: - - - name: cpu_source - interval: 600 <- change this to 60 - meters: - - "cpu" - - -Step 4: Run Workloads on Cluster Nodes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To examine the effect of cluster scaling under high CPU workload. You can now -log into each cluster nodes and run some CPU burning workloads there to drive -the CPU utilization high. For example: - -.. code-block:: console - - $ ssh cirros@10.0.0.9 - $ cat /dev/zero > /dev/null - < Guest system "hang" here... > - -When all nodes in the cluster have their CPU pressure boosted, you can check -the CPU utilization on each node and finally proceed to the next step. - -Step 5: Verify Cluster Scaling -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After a while after the CPU workloads on cluster nodes are started, you will -notice that the cluster has been automatically scaled. Two new nodes are -created and added to the cluster. This can be verified by running the -following command: - -.. code-block:: console - - $ openstack cluster show $MYCLUSTER_ID - -Optionally, you can use the following command to check if the anticipated -action was triggered and executed: - -.. code-block:: console - - $ openstack cluster action list --filters target=$MYCLUSTER_ID diff --git a/doc/source/scenarios/autoscaling_heat.rst b/doc/source/scenarios/autoscaling_heat.rst deleted file mode 100644 index 05099675b..000000000 --- a/doc/source/scenarios/autoscaling_heat.rst +++ /dev/null @@ -1,251 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _guide-tutorial-autoscaling-heat: - -===================== -Autoscaling with Heat -===================== - -Goal -~~~~ - -There are Senlin resource types in Heat which make deployment of a full-featured -auto-scaling solution easily attainable. This document is to provide a tutorial for -users who want to use heat to create a senlin cluster. - -It is often required by real deployment practices to make the cluster load-balanced -and auto-scaled. We also want the scaling action triggered based on business data -instead of infrastructure metrics. When existing cluster is not enough to afford the -throughput/workload, the cluster will be scaled-out; when low throughput or workload, -the cluster will be scaled-in. - -Moreover, custom is easy to do when auto-scaling. Receivers can be created to -generate webhooks from scale_out and scale_in actions. Moreover, placement_zone.yaml -and placement_region.yaml can be attached to cluster and guide which zone/region to -place new nodes when scale_out; deletion_policy can be attached to the cluster and -guide the choice of candidates to delete when scale_in. - -Sample template -~~~~~~~~~~~~~~~ - -There have a sample template in heat-template project under directory of senlin -for creation of Senlin elastic load-balanced cluster by Heat. Here we choose some -important parts of the sample to explain one by one. - -The resource below defines a security_group for connection to created load-balanced -cluster: - -.. code-block:: yaml - - security_group: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 80 - port_range_max: 80 - -The resource below defines the profile used to create the targeted cluster: - -.. code-block:: yaml - - profile: - type: OS::Senlin::Profile - properties: - type: os.nova.server-1.0 - properties: - flavor: {get_param: flavor} - image: {get_param: image} - key_name: {get_param: key_name} - networks: - - network: {get_param: network} - security_groups: - - {get_resource: security_group} - -The resource below defines to create a Senlin cluster with two nodes at least: - -.. code-block:: yaml - - cluster: - type: OS::Senlin::Cluster - properties: - desired_capacity: 2 - min_size: 2 - profile: {get_resource: profile} - -The two resources below define scale_in_policy and scale_out_policy attached to -the created cluster. Where, the property of event is used to define the objective -action the policy works. When type of the property of adjustment is set as -CHANGE_IN_CAPACITY, the cluster will increase the number of nodes when scale_out or -decrease the number of nodes when scale_in: - -.. code-block:: yaml - - scale_in_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.scaling-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - event: CLUSTER_SCALE_IN - adjustment: - type: CHANGE_IN_CAPACITY - number: 1 - - scale_out_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.scaling-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - event: CLUSTER_SCALE_OUT - adjustment: - type: CHANGE_IN_CAPACITY - number: 1 - -The resource below defines a lb_policy to be attached to the target cluster. Once -the policy is attached to the cluster, Senlin will automatically create loadbalancer, -pool, and health_monitor by invoking neutron LBaas V2 APIs for load-balancing purpose: - -.. code-block:: yaml - - lb_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.loadbalance-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - pool: - protocol: HTTP - protocol_port: 80 - subnet: {get_param: pool_subnet} - lb_method: ROUND_ROBIN - vip: - subnet: {get_param: vip_subnet} - protocol: HTTP - protocol_port: 80 - health_monitor: - type: HTTP - delay: 10 - timeout: 5 - max_retries: 4 - -The two resources below define the receivers to be triggered when a certain alarm or -event occurs: - -.. code-block:: yaml - - receiver_scale_out: - type: OS::Senlin::Receiver - properties: - cluster: {get_resource: cluster} - action: CLUSTER_SCALE_OUT - type: webhook - - receiver_scale_in: - type: OS::Senlin::Receiver - properties: - cluster: {get_resource: cluster} - action: CLUSTER_SCALE_IN - type: webhook - -The resource below define the policy for selecting candidate nodes for deletion when -the cluster is to be shrank: - -.. code-block:: yaml - - deletion_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.deletion-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - criteria: YOUNGEST_FIRST - destroy_after_deletion: True - grace_period: 20 - reduce_desired_capacity: False - -The two resources below define the alarms to trigger the above two receivers respectively. -We use the average rate of incoming bytes at LoadBalancer as the metrics to trigger the -scaling operations: - -.. code-block:: yaml - - scale_in_alarm: - type: OS::Ceilometer::Alarm - properties: - description: trigger when bandwidth overflow - meter_name: network.services.lb.incoming.bytes.rate - statistic: avg - period: 180 - evaluation_periods: 1 - threshold: 12000 - repeat_actions: True - alarm_actions: - - {get_attr: [receiver_scale_in, channel, alarm_url]} - comparison_operator: le - query: - metadata.user_metadata.cluster_id: {get_resource: cluster} - - scale_out_alarm: - type: OS::Ceilometer::Alarm - properties: - description: trigger when bandwidth insufficient - meter_name: network.services.lb.incoming.bytes.rate - statistic: avg - period: 60 - evaluation_periods: 1 - threshold: 28000 - repeat_actions: True - alarm_actions: - - {get_attr: [receiver_scale_out, channel, alarm_url]} - comparison_operator: ge - query: - metadata.user_metadata.cluster_id: {get_resource: cluster} - -Deployment Steps -~~~~~~~~~~~~~~~~ - -Before the deployment, please ensure that neutron LBaas v2 and -ceilometer/Aodh has been installed and configured in your environment. - -Step one is to generate key-pair using the followed command: - -.. code-block:: console - - $ openstack keypair create heat_key - -Step two is to create a heat template as by downloading the template file -from `heat template`_. - -Step three is to create a heat stack using the followed command: - -.. code-block:: console - - $ openstack stack create test -t ./ex_aslb.yaml --parameter "key_name=heat_key" - -The steps and samples introduced in this tutorial can also work -well together with composition of ceilometer, Aodh, and Gnocchi -without any change. - -.. _heat template: https://opendev.org/openstack/senlin/src/branch/master/doc/source/scenarios/ex_lbas.yaml diff --git a/doc/source/scenarios/autoscaling_overview.rst b/doc/source/scenarios/autoscaling_overview.rst deleted file mode 100644 index 516f5177d..000000000 --- a/doc/source/scenarios/autoscaling_overview.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _scenario-affinity: - -====================== -Support to AutoScaling -====================== - -The senlin service provides a rich set of facilities for building an -auto-scaling solution: - -- *Operations*: The ``CLUSTER_SCALE_OUT``, ``CLUSTER_SCALE_IN`` operations are - the simplest form of commands to scale a cluster. The ``CLUSTER_RESIZE`` - operation, on the other hand, provides more options for controlling the - detailed cluster scaling behavior. These operations can be performed with - and without policies attached to a cluster. - -- *Policies*: - - The ``senlin.policy.scaling`` (:doc:`link <../user/policy_types/scaling>`) - policy can be applied to fine tune the cluster scaling operations. - - The ``senlin.policy.deletion`` (:doc:`link <../user/policy_types/deletion>`) - policy can be attached to a cluster to control how nodes are removed from a - cluster. - - The ``senlin.policy.affinity`` (:doc:`link <../user/policy_types/affinity>`) - policy can be used to control how node affinity or anti-affinity can be - enforced. - - The ``senlin.policy.region_placement`` - (:doc:`link <../user/policy_types/region_placement>`) can be applied to - scale a cluster across multiple regions. - - The ``senlin.policy.zone_placement`` - (:doc:`link <../user/policy_types/zone_placement>`) can be enforced to - achieve a cross-availability-zone node distribution. - -- *Receivers*: The receiver (:doc:`link <../user/receivers>`) concept provides a - channel to which you can send signals or alarms from an external monitoring - software or service so that scaling operations can be automated. - -This section provides some guides on integrating senlin with other services -so that cluster scaling can be automated. diff --git a/doc/source/scenarios/ex_lbas.yaml b/doc/source/scenarios/ex_lbas.yaml deleted file mode 100644 index f8ca6dd5f..000000000 --- a/doc/source/scenarios/ex_lbas.yaml +++ /dev/null @@ -1,167 +0,0 @@ -heat_template_version: 2016-04-08 - -description: > - This template demonstrate how to create a cluster and attach a - loadbalance policy, a scale-out policy and scale-in policy to it. - -parameters: - flavor: - description: Flavor for the instances to be created. - type: string - default: m1.nano - image: - description: Name or ID of the image to use for the instances. - type: string - default: cirros-0.3.5-x86_64-disk - key_name: - description: Name of an existing key pair to use for the instances. - type: string - network: - description: The network for the instances. - type: string - default: private - pool_subnet: - description: Subnet for the port on which members can be connected. - type: string - default: private-subnet - vip_subnet: - description: Subnet on which VIP address will be allocated. - type: string - default: private-subnet - -resources: - security_group: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - profile: - type: OS::Senlin::Profile - properties: - type: os.nova.server-1.0 - properties: - flavor: {get_param: flavor} - image: {get_param: image} - key_name: {get_param: key_name} - networks: - - network: {get_param: network} - security_groups: - - {get_resource: security_group} - - cluster: - type: OS::Senlin::Cluster - properties: - desired_capacity: 2 - min_size: 2 - profile: {get_resource: profile} - - scale_in_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.scaling-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - event: CLUSTER_SCALE_IN - adjustment: - type: CHANGE_IN_CAPACITY - number: 1 - - scale_out_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.scaling-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - event: CLUSTER_SCALE_OUT - adjustment: - type: CHANGE_IN_CAPACITY - number: 1 - - lb_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.loadbalance-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - pool: - protocol: HTTP - protocol_port: 80 - subnet: {get_param: pool_subnet} - lb_method: ROUND_ROBIN - vip: - subnet: {get_param: vip_subnet} - protocol: HTTP - protocol_port: 80 - health_monitor: - type: HTTP - delay: 10 - timeout: 5 - max_retries: 4 - - receiver_scale_out: - type: OS::Senlin::Receiver - properties: - cluster: {get_resource: cluster} - action: CLUSTER_SCALE_OUT - type: webhook - - receiver_scale_in: - type: OS::Senlin::Receiver - properties: - cluster: {get_resource: cluster} - action: CLUSTER_SCALE_IN - type: webhook - - deletion_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.deletion-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - criteria: YOUNGEST_FIRST - destroy_after_deletion: True - grace_period: 20 - reduce_desired_capacity: False - - scale_in_alarm: - type: OS::Ceilometer::Alarm - properties: - description: trigger when bandwidth overflow - meter_name: network.services.lb.incoming.bytes.rate - statistic: avg - period: 180 - evaluation_periods: 1 - threshold: 12000 - repeat_actions: True - alarm_actions: - - {get_attr: [receiver_scale_in, channel, alarm_url]} - comparison_operator: le - query: - metadata.user_metadata.cluster_id: {get_resource: cluster} - - scale_out_alarm: - type: OS::Ceilometer::Alarm - properties: - description: trigger when bandwidth insufficient - meter_name: network.services.lb.incoming.bytes.rate - statistic: avg - period: 60 - evaluation_periods: 1 - threshold: 28000 - repeat_actions: True - alarm_actions: - - {get_attr: [receiver_scale_out, channel, alarm_url]} - comparison_operator: ge - query: - metadata.user_metadata.cluster_id: {get_resource: cluster} diff --git a/doc/source/tutorial/autoscaling.rst b/doc/source/tutorial/autoscaling.rst deleted file mode 100644 index cfeff9b17..000000000 --- a/doc/source/tutorial/autoscaling.rst +++ /dev/null @@ -1,172 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial-autoscaling: - -=========================== -Making Your Cluster Elastic -=========================== - -Creating Receivers -~~~~~~~~~~~~~~~~~~ - -Suppose you want a cluster to scale out by one node each time an event occurs, -you can create a receiver for this task: - -.. code-block:: console - - $ openstack cluster receiver create --type webhook --cluster mycluster \ - --action CLUSTER_SCALE_OUT so_receiver_1 - +------------+---------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------+ - | action | CLUSTER_SCALE_OUT | - | actor | { | - | | "trust_id": "b2b8fd71c3d54f67ac14e5851c0117b8" | - | | } | - | channel | { | - | | "alarm_url": "" | - | | } | - | cluster_id | 30d7ef94-114f-4163-9120-412b78ba38bb | - | created_at | 2017-02-08T02:08:13Z | - | domain_id | None | - | id | 5722a2b0-1f5f-4a82-9c08-27da9982d46f | - | location | None | - | name | so_receiver_1 | - | params | {} | - | project_id | 36d551c0594b4cc99d1bbff8bf202ec3 | - | type | webhook | - | updated_at | None | - | user_id | 9563fa29642a4efdb1033bf8aab07daa | - +------------+---------------------------------------------------+ - - -The command above creates a receiver named ``so_receiver_1`` which can be used -to initiate a ``CLUSTER_SCALE_OUT`` action on the cluster ``my_cluster``. From -the output of this command, you will find an ``alarm_url`` value from the -``channel`` property. This will be the URL for you to trigger the scaling -operation. - -.. note:: - - You are expected to treat the ``alarm_url`` value as a secret. Any person or - software which knows this value will be able to trigger the scaling operation - on your cluster. This may not be what you wanted. - -The default type of receiver would be "``webhook``". You may choose to create -a "``message``" type of receiver if you have the zaqar messaging service -installed. For more details, please refer to :ref:`ref-receivers`. - -Triggering Scaling -~~~~~~~~~~~~~~~~~~ - -Once you have received a channel from the created receiver, you can use it to -trigger the associated action on the specified cluster. The simplest way to -do this is to use the :command:`curl` command as shown below: - -.. code-block:: console - - $ curl -X POST - -Once the above request is received by the senlin-api, your cluster will be -scaled out by one node. In other words, a new node is created into the -cluster. - - -Creating Scaling Policies -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Senlin provides some builtin policy types to control how a cluster will be -scaled when a relevant request is received. A scaling request can be a simple -``CLUSTER_SCALE_OUT`` or ``CLUSTER_SCALE_IN`` action which can accept an -optional ``count`` argument; it can be a more complex ``CLUSTER_RESIZE`` -action which can accept more arguments for fine-tuning the scaling behavior. - -In the absence of such arguments (which is not uncommon if you are using a -3rd party monitoring software which doesn't have the intelligence to decide -each and every argument), you can always use scaling policies for this -purpose. - -Below is a sample YAML file (:file:`examples/policies/scaling_policy.yaml`) -used for creating a scaling policy object:: - - type: senlin.policy.scaling - version: 1.0 - properties: - event: CLUSTER_SCALE_IN - adjustment: - type: CHANGE_IN_CAPACITY - number: 2 - min_step: 1 - best_effort: True - cooldown: 120 - -To create a policy object, you can use the following command: - -.. code-block:: console - - $ openstack cluster policy create \ - --spec-file examples/policies/scaling_policy.yaml \ - policy1 - +------------+--------------------------------------+ - | Field | Value | - +------------+--------------------------------------+ - | created_at | 2016-12-08T02:41:30.000000 | - | data | {} | - | domain_id | None | - | id | 3ca962c5-68ce-4293-9087-c73964546223 | - | location | None | - | name | policy1 | - | project_id | 36d551c0594b4cc99d1bbff8bf202ec3 | - | spec | { | - | | "version": 1.0, | - | | "type": "senlin.policy.scaling", | - | | "properties": { | - | | "adjustment": { | - | | "min_step": 1, | - | | "cooldown": 120, | - | | "best_effort": true, | - | | "number": 1, | - | | "type": "CHANGE_IN_CAPACITY" | - | | }, | - | | "event": "CLUSTER_SCALE_IN" | - | | } | - | | } | - | type | senlin.policy.scaling-1.0 | - | updated_at | None | - | user_id | 9563fa29642a4efdb1033bf8aab07daa | - +------------+--------------------------------------+ - -The next step to enforce this policy on your cluster is to attach the policy -to it, as shown below: - -.. code-block:: console - - $ openstack cluster policy attach --policy policy1 mycluster - Request accepted by action: 89626141-0999-4e76-9795-a86c4cfd531f - - $ openstack cluster policy binding list mycluster - +-----------+-------------+---------------------------+------------+ - | policy_id | policy_name | policy_type | is_enabled | - +-----------+-------------+---------------------------+------------+ - | 3ca962c5 | policy1 | senlin.policy.scaling-1.0 | True | - +-----------+-------------+---------------------------+------------+ - -In future, when your cluster is about to be scaled in (no matter the request -comes from a user or a software or via a receiver), the scaling policy attached -will help determine 1) how many nodes to be removed, 2) whether the scaling -operation should be done on a best effort basis, 3) for how long the cluster -will not respond to further scaling requests, etc. - -For more information on using scaling policy, you can refer to -:ref:`ref-scaling-policy`. diff --git a/doc/source/tutorial/basics.rst b/doc/source/tutorial/basics.rst deleted file mode 100644 index b60882cb4..000000000 --- a/doc/source/tutorial/basics.rst +++ /dev/null @@ -1,179 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial-basic: - -============= -Senlin Basics -============= - -.. note:: - - This tutorial assumes that you are working on the master branch of the - senlin source code which contains the latest profile samples and policy - samples. To clone the latest code base: - - .. code-block:: console - - $ git clone https://git.openstack.org/openstack/senlin.git - -Follow the `Installation Guide`_ to install the senlin service. - - -Creating Your First Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A profile captures the necessary elements you need to create a node. The -following is a profile specification (``spec`` for short) that can be used -to create a nova server: - -.. literalinclude:: ../../../examples/profiles/nova_server/cirros_basic.yaml - :language: yaml - -.. note:: - The above source file can be found in senlin source tree at - ``/examples/profiles/nova_server/cirros_basic.yaml``. - -The **spec** assumes that: - -- you have a nova keypair named ``oskey``, and -- you have a neutron network named ``private``, and -- there is a glance image named ``cirros-0.3.5-x86_64-disk`` - -You may have to change the values based on your environment setup before using -this file to create a profile. After the **spec** file is modified properly, -you can use the following command to create a profile object: - -.. code-block:: console - - $ cd $SENLIN_ROOT/examples/profiles/nova_server - $ openstack cluster profile create --spec-file cirros_basic.yaml myserver - -Check the :doc:`Profiles <../user/profiles>` section in the -:ref:`user-references` documentation for more details. - -Creating Your First Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -With a profile created, we can proceed to create a cluster by specifying the -profile and a cluster name. - -.. code-block:: console - - $ openstack cluster create --profile myserver mycluster - -If you don't explicitly specify a number as the desired capacity of the -cluster, senlin won't create nodes in the cluster. That means the newly -created cluster is empty. If you do provide a number as the desired capacity -for the cluster as shown below, senlin will create the specified number of -nodes in the cluster. - -.. code-block:: console - - $ openstack cluster create --profile myserver --desired-capacity 1 mycluster - $ openstack cluster show mycluster - -For more details, check the :doc:`Creating a Cluster <../user/clusters>` -section in the :ref:`user-references` documentation. - - -Scaling a Cluster -~~~~~~~~~~~~~~~~~ - -Now you can try to change the size of your cluster. To increase the size, -use the following command: - -.. code-block:: console - - $ openstack cluster expand mycluster - $ openstack cluster show mycluster - -To decrease the size of the cluster, use the following command: - -.. code-block:: console - - $ openstack cluster shrink mycluster - $ openstack cluster show mycluster - -For more details, please check the :doc:`Resizing a Cluster <../user/clusters>` -section in the :ref:`user-references` section. - - -Resizing a Cluster -~~~~~~~~~~~~~~~~~~ - -Yet another way to change the size of a cluster is to use the command -``cluster-resize``: - -.. code-block:: console - - $ openstack cluster resize --capacity 2 mycluster - $ openstack cluster show mycluster - -The ``cluster-resize`` command supports more flexible options to control how -a cluster is resized. For more details, please check the -:doc:`Resizing a Cluster <../user/clusters>` section in the -:ref:`user-references` section. - - -Creating a Node ---------------- - -Another way to manage cluster node membership is to create a standalone node -then add it to a cluster. To create a node using a given profile: - -.. code-block:: console - - $ openstack cluster node create --profile myserver newnode - $ openstack cluster node show newnode - -For other options supported by the ``node-create`` command, please check the -:doc:`Creating a Node <../user/nodes>` subsection in the -:ref:`user-references` documentation. - - -Adding a Node to a Cluster --------------------------- - -If a node has the same profile type as that of a cluster, you can add the node -to the cluster using the ``cluster-node-add`` command: - -.. code-block:: console - - $ openstack cluster members add --nodes newnode mycluster - $ openstack cluster members list mycluster - $ openstack cluster show mycluster - $ openstack cluster node show newnode - -After the operation is completed, you will see that the node becomes a member -of the target cluster, with an index value assigned. - -Removing a Node from a Cluster ------------------------------- - -You can also remove a node from a cluster using the ``cluster-node-del`` -command: - -.. code-block:: console - - $ openstack cluster members del --nodes newnode mycluster - $ openstack cluster members list mycluster - $ openstack cluster show mycluster - $ openstack cluster node show newnode - -For other cluster membership management commands and options, please check the -:doc:`Cluster Membership <../user/membership>` section in the -:ref:`user-references` section. - - -.. _Installation Guide: https://docs.openstack.org/senlin/latest/install diff --git a/doc/source/tutorial/policies.rst b/doc/source/tutorial/policies.rst deleted file mode 100644 index 5dd961418..000000000 --- a/doc/source/tutorial/policies.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial-policies: - -===================== -Working with Policies -===================== - -Creating a Policy -~~~~~~~~~~~~~~~~~ - -A policy contains the set of rules that are checked/enforced before or -after certain cluster operations are performed. The detailed specification -of a specific policy type is provided as the ``spec`` of a policy object -when it is created. The following is a sample ``spec`` for a deletion policy: - -.. literalinclude:: ../../../examples/policies/deletion_policy.yaml - :language: yaml - -.. note:: - The above source file can be found in senlin source tree at - ``/examples/policies/deletion_policy.yaml``. - -To create a policy object using this specification (``spec`` for short): - -.. code-block:: console - - $ cd $SENLIN_ROOT/examples/policies - $ openstack cluster policy create --spec-file deletion_policy.yaml dp01 - -To verify the policy creation, you can do: - -.. code-block:: console - - $ openstack cluster policy list - $ openstack cluster policy show dp01 - -Attaching a Policy -~~~~~~~~~~~~~~~~~~ - -The enforce a policy on a cluster, attach a policy to it: - -.. code-block:: console - - $ openstack cluster policy attach --policy dp01 mycluster - -To verify the policy attach operation, do the following: - -.. code-block:: console - - $ openstack cluster policy binding list mycluster - $ openstack cluster policy binding show --policy dp01 mycluster - -Verifying a Policy -~~~~~~~~~~~~~~~~~~ - -To verify the deletion policy attached to the cluster ``mycluster``, you -can try expanding the cluster, followed by shrinking it: - -.. code-block:: console - - $ openstack cluster members list mycluster - $ openstack cluster expand mycluster - $ openstack cluster members list mycluster - $ openstack cluster shrink mycluster - $ openstack cluster members list mycluster - -After the scale-in operation is completed, you will find that the oldest -node from the cluster is removed. If you want to remove the youngest node -instead, you can create a different deletion policy with a different -specification. - -For more details about policy types and policy management, check the -:doc:`Policy Types <../user/policy_types>` section and the -:doc:`Policies <../user/policies>` section in the -:ref:`user-references` documentation respectively. -You may also want to check the -:doc:`Cluster-Policy Bindings <../user/bindings>` section in the -:ref:`user-references` section for more details on managing the cluster-policy -relationship. diff --git a/doc/source/tutorial/receivers.rst b/doc/source/tutorial/receivers.rst deleted file mode 100644 index 5db2050eb..000000000 --- a/doc/source/tutorial/receivers.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial-receivers: - -====================== -Working with Receivers -====================== - -Receivers are the event sinks associated to senlin clusters. When -certain events (or alarms) are seen by a monitoring software, the software can -notify the senlin clusters of those events (or alarms). When senlin receives -those notifications, it can automatically trigger some predefined operations -with preset parameter values. - -Creating a Receiver -~~~~~~~~~~~~~~~~~~~ - -To create a receiver, you need to specify the target cluster and the target -action to be triggered in future. For example, the following command creates -a receiver that will trigger the ``CLUSTER_SCALE_IN`` operation on the target -cluster: - -.. code-block:: console - - $ openstack cluster receiver create --cluster mycluster --action CLUSTER_SCALE_IN w_scale_in - -The output from the command will be something like this: - -.. code-block:: console - - $ openstack cluster receiver create --cluster mycluster --action CLUSTER_SCALE_IN w_scale_in - +------------+-------------------------------------------------------------------------+ - | Field | Value | - +------------+-------------------------------------------------------------------------+ - | action | CLUSTER_SCALE_IN | - | actor | { | - | | "trust_id": "1bc958f5780b4ad38fb6583701a9f39b" | - | | } | - | channel | { | - | | "alarm_url": "http://node1:8777/v1/webhooks/5dacde18-.../trigger?V=2" | - | | } | - | cluster_id | 7fb3d988-3bc1-4539-bd5d-3f72e8d6e0c7 | - | created_at | 2016-05-23T01:36:39 | - | domain_id | None | - | id | 5dacde18-661e-4db4-b7a8-f2a6e3466f98 | - | location | None | - | name | w_scale_in | - | params | None | - | project_id | eee0b7c083e84501bdd50fb269d2a10e | - | type | webhook | - | updated_at | None | - | user_id | ab79b9647d074e46ac223a8fa297b846 | - +------------+-------------------------------------------------------------------------+ - -From the output of the ``openstack cluster receiver create`` command, -you can see: - -- There is a ``type`` property whose value is set to ``webhook`` default which is one of - the receiver types senlin supports. -- There is a ``channel`` property which contains an ``alarm_url`` key. The - value of the ``alarm_url`` is the endpoint for your to post a request. - -Triggering a Receiver with CURL -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once we have a receiver created, you can test it by triggering the specified -action using tools like ``curl``. - -.. code-block:: console - - $ curl -X POST http://node1:8777/v1/webhooks/5dacde18-661e-4db4-b7a8-f2a6e3466f98/trigger?V=2 - -After a while, you can check that the cluster has been shrunk by 1 node. - -For more details about managing receivers, please check the -:doc:`Receivers <../user/receivers>` section in the -:ref:`user-references` documentation. diff --git a/doc/source/user/actions.rst b/doc/source/user/actions.rst deleted file mode 100644 index 1bde34742..000000000 --- a/doc/source/user/actions.rst +++ /dev/null @@ -1,184 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-actions: - - -======= -Actions -======= - -Concept -~~~~~~~ - -An :term:`Action` is an operation that can be performed on a :term:`Cluster` -or a :term:`Node`. Each action is executed asynchronously by a worker thread -after being created. Most Senlin APIs are executed asynchronously inside the -Senlin engine except for some object retrieval or object listing APIs. - -Different types of objects support different sets of actions. For example, a -cluster object supports the following actions: - -* ``CREATE``: creates a cluster; -* ``DELETE``: deletes a cluster; -* ``UPDATE``: update the properties and/or the profile used by a cluster; -* ``ADD_NODES``: add existing nodes to a cluster; -* ``DEL_NODES``: remove nodes from a cluster; -* ``ATTACH_POLICY``: attach the specified policy to a cluster; -* ``DETACH_POLICY``: detach the specified policy from a cluster; -* ``UPDATE_POLICY``: update the specified policy on a cluster; -* ``SCALE_IN``: shrink the size of a cluster; -* ``SCALE_OUT``: inflate the size of a cluster; -* ``RESIZE``: resize a cluster; -* ``CHECK``: check a cluster; -* ``RECOVER``: recover a cluster; -* ``REPLACE_NODES``: replace the nodes in cluster with specified nodes; -* ``OPERATION``: perform an operation on the specified cluster; - -A node object supports the following actions: - -* ``CREATE``: creates a node; -* ``DELETE``: deletes a node; -* ``UPDATE``: updates the properties and/or the profile used by a node; -* ``CHECK``: check a node; -* ``RECOVER``: recover a node; -* ``OPERATION``: perform an operation on the specified node; - -In future, Senlin may support user defined actions (UDAs). - - -Listing Actions -~~~~~~~~~~~~~~~ - -The following command shows the actions known by the Senlin engine:: - - $ openstack cluster action list - +----------+-------------------------+----------------+-----------+----------+------------+-------------+----------------------+ - | id | name | action | status | target_id| depends_on | depended_by | created_at | - +----------+-------------------------+----------------+-----------+----------+------------+-------------+----------------------+ - | 1189f5e8 | node_create_b825fb74 | NODE_CREATE | SUCCEEDED | b825fb74 | | | 2016-09-22T10:13:24Z | - | 2454c28a | node_delete_c035c519 | NODE_DELETE | SUCCEEDED | c035c519 | | | 2016-09-22T10:53:09Z | - | 252b9491 | node_create_c035c519 | NODE_CREATE | SUCCEEDED | c035c519 | | | 2016-09-22T10:54:09Z | - | 34802f3b | cluster_create_7f37e191 | CLUSTER_CREATE | SUCCEEDED | 7f37e191 | | | 2016-09-22T11:04:00Z | - | 4250bf29 | cluster_delete_7f37e191 | CLUSTER_DELETE | SUCCEEDED | 7f37e191 | | | 2016-09-22T11:06:32Z | - | 67cbcfb5 | node_delete_b825fb74 | NODE_DELETE | SUCCEEDED | b825fb74 | | | 2016-09-22T11:14:04Z | - | 6e661db8 | cluster_create_44762dab | CLUSTER_CREATE | SUCCEEDED | 44762dab | | | 2016-09-22T11:14:44Z | - | 7bfad7ed | node_delete_b716052d | NODE_DELETE | SUCCEEDED | b716052d | | | 2016-09-22T11:15:22Z | - | b299cf44 | cluster_delete_44762dab | CLUSTER_DELETE | SUCCEEDED | 44762dab | | | 2016-09-22T11:18:18Z | - | e973552e | node_create_b716052d | NODE_CREATE | SUCCEEDED | b716052d | | | 2016-09-22T11:25:58Z | - +----------+-------------------------+----------------+-----------+----------+------------+-------------+----------------------+ - -The :program:`openstack cluster` command line supports various options when -listing the actions. - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list actions, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are action properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command instructs the :program:`openstack cluster` -command to sort actions using the ``name`` property in descending order:: - - $ openstack cluster action list --sort name:desc - -When sorting the list of actions, you can use one of ``name``, ``target``, -``action``, ``created_at`` and ``status``. - - -Filtering the List ------------------- - -You can filter the list of actions using the :option:`--filters``. For example, -the following command filters the action list by the ``action`` property:: - - $ openstack cluster action list --filters action=CLUSTER_SCALE_OUT - -The option :option:`--filters` accepts a list of key-value pairs separated by -semicolon (``;``), where each pair is expected to be of format ``key=val``. -The valid keys for filtering include ``name``, ``target``, ``action`` and -``status`` or any combination of them. - - -Paginating the Query results ----------------------------- - -In case you have a huge collection of actions (which is highly likely the -case), you can limit the number of actions returned using the option -:option:`--limit `. For example:: - - $ openstack cluster action list --limit 1 - -Another option you can specify is the ID of an action after which you want to -see the returned list starts. In other words, you don't want to see those -actions with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster action list --limit 1 \ - --marker 2959122e-11c7-4e82-b12f-f49dc5dac270 - -Only 1 action record is returned in this example and its UUID comes after the -one specified from the command line. - - -Showing Details of an Action -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the :program:`openstack cluster` command to show the details about -an action you are interested in. When specifying the identity of the action, -you can use its name, its ID or its "short ID" . Senlin API and engine will -verify if the identifier you specified can uniquely identify an action. An -error message will be returned if there is no action matching the identifier -or if more than one action matching it. - -An example is shown below:: - - $ openstack cluster action show 8fac487f - +---------------+--------------------------------------+ - | Field | Value | - +---------------+--------------------------------------+ - | action | CLUSTER_DELETE | - | cause | RPC Request | - | created_at | 2016-09-23T09:00:25Z | - | depended_by | | - | depends_on | | - | domain_id | None | - | end_at | 1450683904.0 | - | id | 8fac487f-861a-449e-9678-478133bea8de | - | inputs | {} | - | interval | -1 | - | location | None | - | name | cluster_delete_7deb546f | - | outputs | {} | - | owner_id | None | - | project_id | bdeecc1b58004bb19302da77ac056b44 | - | start_at | 1450683904.0 | - | status | SUCCEEDED | - | status_reason | Action completed successfully. | - | target_id | 7deb546f-fd1f-499a-b120-94f8f07fadfb | - | timeout | 3600 | - | updated_at | None | - | user_id | f3cdb8010bb349d5bdff2815d8f007a1 | - +---------------+--------------------------------------+ - - -See Also -~~~~~~~~ - -* :doc:`Creating Receivers ` -* :doc:`Browsing Events ` diff --git a/doc/source/user/bindings.rst b/doc/source/user/bindings.rst deleted file mode 100644 index e6db0f412..000000000 --- a/doc/source/user/bindings.rst +++ /dev/null @@ -1,174 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-bindings: - -======================= -Cluster-Policy Bindings -======================= - -Concept -~~~~~~~ - -A :term:`Policy` object can be attached to at least one :term:`Cluster` at the -same time. A cluster at any time can have more than one Policy objects -attached to it. - -After a policy object is attached to a cluster, you can still enable or -disable it or update some properties of the policy object. - - -Listing Policies Attached to a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command provides a sub-command -:command:`policy binding list` to list policy objects that are attached to a -cluster. You can provide the name, the ID or the "short ID" of a cluster as -the identifier to reference a cluster. For example, the command below lists -the policies attached to the cluster ``webservers``:: - - $ openstack cluster policy binding list webservers - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list cluster -policies, using the option :option:`--sort`. The :option:`--sort` option -accepts a string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the -keys used are properties of the policy bound to a cluster and the dirs can be -one of ``asc`` and ``desc``. When omitted, Senlin sorts a given key using -``asc`` as the default direction. - -For example, the following command line sorts the policy bindings using the -``enabled`` property in descending order:: - - $ openstack cluster policy binding list --sort enabled:desc c3 - -When sorting the list of policies, ``enabled`` is the only key you can specify -for sorting. - - -Filtering the List ------------------- - -The :program:`openstack cluster` command also supports options for filtering -the policy list at the server side. The option :option:`--filters` can be used -for this purpose. For example, the following command filters clusters by the -``is_enabled`` field:: - - $ openstack cluster policy binding list --filters enabled=True c3 - +-----------+-------------+---------------------------+------------+ - | policy_id | policy_name | policy_type | is_enabled | - +-----------+-------------+---------------------------+------------+ - | 0705f0f4 | up01 | senlin.policy.scaling-1.0 | True | - +-----------+-------------+---------------------------+------------+ - -The option :option:`--filters` accepts a list of key-value pairs separated by -semicolon (``;``), where each key-value pair is expected to be of format -``=``. The only key that can be used for filtering as of today is -``enabled``. - - -Attaching a Policy to a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Senlin permits policy objects to be attached to clusters and to be detached -from clusters dynamically. When attaching a policy object to a cluster, you -can customize the policy properties for the particular cluster. For example, -you can specify whether the policy should be enabled once attached. - -The following options are supported for the command -:command:`openstack cluster policy attach`: - -- :option:`--enabled`: a boolean indicating whether the policy to be enabled - once attached. - -For example, the following command attaches a policy named ``up01`` to the -cluster ``c3``, When a policy is attached to a cluster, it is enabled by -default. To keep it disabled, the user can use the parameter ``--enabled False``. -For example:: - - $ openstack cluster policy attach --policy up01 --enabled False c3 - -Note that most of the time, Senlin doesn't allow more than one policy of the -same type to be attached to the same cluster. This restriction is relaxed for -some policy types. For example, when working with policies about scaling, you -can actually attach more than one policy instances to the same cluster, each of -which is about a specific scenario. - -For the identifiers specified for the cluster and the policy, you can use the -name, the ID or the "short ID" of an object. The Senlin engine will try make a -guess on each case. If no entity matches the specified identifier or there are -more than one entity matching the identifier, you will get an error message. - - -Showing Policy Properties on a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To examine the detailed properties of a policy object that has been attached -to a cluster, you can use the :command:`openstack cluster policy binding show` -command with the policy identifier and the cluster identifier specified. For -example:: - - $ openstack cluster policy binding show --policy dp01 c3 - +--------------+--------------------------------------+ - | Field | Value | - +--------------+--------------------------------------+ - | cluster_name | c3 | - | data | None | - | id | 2b7e9294-b5cd-470f-b191-b18f7e672495 | - | is_enabled | True | - | location | None | - | name | None | - | policy_id | 239d7212-6196-4a89-9446-44d28717d7de | - | policy_name | dp01 | - | policy_type | senlin.policy.deletion-1.0 | - +--------------+--------------------------------------+ - -You can use the name, the ID or the "short ID" of a policy and/or a cluster to -name the objects. - - -Updating Policy Properties on a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once a policy is attached to a cluster, you can request its property on this -cluster be changed by using the command -:command:`openstack cluster policy binding update`. Presently, you can only -specify the ``enabled`` property to be updated. - -For example, the following command disables a policy on the specified cluster:: - - $ openstack cluster policy binding update \ - --enabled False --policy dp01 \ - mycluster - -The Senlin engine will perform validation of the arguments in the same way as -that for the policy attach operation. You can use the name, the ID or the -"short ID" of an entity to reference it, as you do with the policy attach -operation as well. - - -Detach a Policy from a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Finally, to remove the binding between a specified policy object from a -cluster, you can use the :command:`openstack cluster policy detach` command as -shown below:: - - $ openstack cluster policy detach --policy dp01 mycluster - -This command will detach the specified policy from the specified cluster. -You will use the option :option:`--policy` to specify the policy. diff --git a/doc/source/user/clusters.rst b/doc/source/user/clusters.rst deleted file mode 100644 index f676b0638..000000000 --- a/doc/source/user/clusters.rst +++ /dev/null @@ -1,503 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-clusters: - -======== -Clusters -======== - -Concept -~~~~~~~ - -A :term:`Cluster` is a group of logical objects, each of which is called a -:term:`Node` in Senlin's terminology. A cluster can contain zero or more -nodes. A cluster has a ``profile_id`` property that specifies which default -:term:`Profile` to use when new nodes are created/scaled as members of the -cluster. It is valid for nodes in a cluster to reference different profile -objects because Senlin only mandates that all nodes in a cluster having the -same **profile type**. - -Senlin provides APIs and command line supports to manage the cluster -membership. Please refer to :ref:`ref-membership` for details. Senlin also -supports attaching :term:`Policy` objects to a cluster, customizing the policy -properties when attaching a policy to a cluster. Please refer to -:ref:`ref-bindings` for details. - -Listing Clusters -~~~~~~~~~~~~~~~~ - -The following command shows the clusters managed by the Senlin service:: - - $ openstack cluster list - +----------+------+--------+----------------------+------------+ - | id | name | status | created_at | updated_at | - +----------+------+--------+----------------------+------------+ - | 2959122e | c1 | ACTIVE | 2015-05-05T13:27:28Z | None | - | 092d0955 | c2 | ACTIVE | 2015-05-05T13:27:48Z | None | - +----------+------+--------+----------------------+------------+ - -Note that the first column in the output table is a *short ID* of a cluster -object. Senlin command line use short IDs to save real estate on screen so -that more useful information can be shown on a single line. To show the *full -ID* in the list, you can add the :option:`--full-id` option to the command:: - - $ openstack cluster list --full-id - +--------------------+------+--------+--------------------- +------------+ - | id | name | status | created_at | updated_at | - +--------------------+------+--------+----------------------+------------+ - | 2959122e-11c7-.... | c1 | ACTIVE | 2015-05-05T13:27:28Z | None | - | 092d0955-2645-.... | c2 | ACTIVE | 2015-05-05T13:27:48Z | None | - +--------------------+------+--------+----------------------+------------+ - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list clusters, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are cluster properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the clusters using the ``name`` -property in descending order:: - - $ openstack cluster list --sort name:desc - -When sorting the list of clusters, you can use one of ``name``, ``status``, -``init_at``, ``created_at`` and ``updated_at``. - - -Filtering the List ------------------- - -The :program:`openstack cluster list` command also provides options for -filtering the cluster list at the server side. The option :option:`--filters` -can be used for this purpose. For example, the following command filters the -clusters by the ``status`` field:: - - $ openstack cluster list --filters status=ACTIVE - +----------+------+--------+----------------------+------------+ - | id | name | status | created_at | updated_at | - +----------+------+--------+----------------------+------------+ - | 2959122e | c1 | ACTIVE | 2015-05-05T13:27:28Z | None | - | 092d0955 | c2 | ACTIVE | 2015-05-05T13:27:48Z | None | - +----------+------+--------+----------------------+------------+ - -The option :option:`--filters` accepts a list of key-value pairs separated by -semicolon (``;``), where each key-value pair is expected to be of format -``=``. The valid keys for filtering include: ``status``, ``name``, -``project`` and ``user``. - - -Paginating the Query Results ----------------------------- - -In case you have a huge collection of clusters, you can limit the number of -clusters returned from Senlin server each time, using the option -:option:`--limit `. For example:: - - $ openstack cluster list --limit 1 - +----------+------+--------+----------------------+------------+ - | id | name | status | created_at | updated_at | - +----------+------+--------+----------------------+------------+ - | 2959122e | c1 | ACTIVE | 2015-05-05T13:27:28Z | None | - +----------+------+--------+----------------------+------------+ - -Another option you can specify is the ID of a cluster after which you want to -see the returned list starts. In other words, you don't want to see those -clusters with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster list --limit 1 \ - --marker 2959122e-11c7-4e82-b12f-f49dc5dac270 - +----------+------+--------+----------------------+------------+ - | id | name | status | created_at | updated_at | - +----------+------+--------+----------------------+------------+ - | 092d0955 | c2 | ACTIVE | 2015-05-05T13:27:48Z | None | - +----------+------+--------+----------------------+------------+ - -Only 1 cluster record is returned in this example and its UUID comes after the -one specified from the command line. - - -Creating a Cluster -~~~~~~~~~~~~~~~~~~ - -To create a cluster, you need to provide the ID or name of the profile to be -associated with the cluster. For example:: - - $ openstack cluster create --profile qstack c3 - +------------------+--------------------------------------+ - | Property | Value | - +------------------+--------------------------------------+ - | config | {} | - | created_at | None | - | data | {} | - | dependents | {} | - | desired_capacity | 0 | - | domain_id | None | - | id | 60424eb3-6adf-4fc3-b9a1-4a035bf171ac | - | init_at | 2015-05-05T13:35:47Z | - | location | None | - | max_size | -1 | - | metadata | {} | - | min_size | 0 | - | name | c3 | - | node_ids | | - | profile_id | bf38dc9f-d204-46c9-b515-79caf1e45c4d | - | profile_name | qstack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | status | INIT | - | status_reason | Initializing | - | timeout | 3600 | - | updated_at | None | - | user_id | 0b82043b57014cd58add97a2ef79dac3 | - +------------------+--------------------------------------+ - -From the output you can see that a new cluster object created and put to -``INIT`` status. Senlin will verify if profile specified using the option -:option:`--profile ` does exist. The server allows the ```` -value to be a profile name, a profile ID or the short ID of a profile object. -If the profile is not found or multiple profiles found matching the value, you -will receive an error message. - - -Controlling Cluster Capacity ----------------------------- - -When creating a cluster, by default :program:`senlin` will create a cluster -with no nodes, i.e. the ``desired_capacity`` will be set to 0. However, you -can specify the desired capacity of the cluster, the maximum size and/or the -minimum size of the cluster. The default value for ``min_size`` is 0 and the -default value for ``max_size`` is -1, meaning that there is no upper bound for -the cluster size. - -The following command creates a cluster named "``test_cluster``", with its -desired capacity set to 2, its minimum size set to 1 and its maximum size set -to 3:: - - $ openstack cluster create --desired-capacity 2 \ - --min-size 1 --max-size 3 \ - --profile myprofile \ - test_cluster - -Senlin API and Senlin engine will validate the settings for these capacity -arguments when receiving this request. An error message will be returned if -the arguments fail to pass this validation, or else the cluster creation -request will be queued as an action for execution. - -When ``desired_capacity`` is not specified and ``min_size`` is not specified, -Senlin engine will create an empty cluster. When either ``desired_capacity`` -or ``min_size`` is specified, Senlin will start the process of creating nodes -immediately after the cluster object is created. - - -Other Properties ----------------- - -You can use the option :option:`--metadata` (or :option:`-M`) to associate -some key-value pairs to the cluster to be created. These data are referred to -as the "metadata" for the cluster. - -Since cluster operations may take some time to finish when being executed and -Senlin interacts with the backend services to make it happen, there needs a -way to verify whether an operation has timed out. When creating a cluster -using the :program:`openstack cluster create` command line, you can use the -option :option:`--timeout ` to specify the default time out in number -of seconds. This value would be the global setting for the cluster. - -You can use the option :option:`--config` to pass in key-value pairs to the -cluster to be created. The following config properties are supported: - -- ``node.name.format``: Specify how cluster nodes are automatically named. - The value can contain placeholders like ``$nI`` for node index padded with - n number of zeros to the left, or ``$nR`` for random string of length n. - -- ``cluster.stop_node_before_delete``: If set to True, cluster operations that - result in a node deletion (e.g. scale-in, resize, etc) will request a node - stop first. Once the node has been successfully shutdown, the node is - deleted. The default setting is False for which a cluster performs a node - delete without stopping the node. - -- ``cluster.stop_timeout_before_update``: Specifies the timeout value in - seconds to wait for when stopping a node as part of a cluster update - operation. If this value is not set, the value for default_nova_timeout in - the configuration will be used. - - -Showing Details of a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When there are clusters in the Senlin database, you can request Senlin to show -the details about a cluster you are interested in. - -You can use the name, the ID or the "short ID" of a cluster to name a cluster -for show. Senlin API and engine will verify if the identifier you specified -can uniquely identify a cluster. An error message will be returned if there is -no cluster matching the identifier or if more than one cluster matching it. - -An example is shown below:: - - $ openstack cluster show c3 - +------------------+--------------------------------------+ - | Field | Value | - +------------------+--------------------------------------+ - | config | {} | - | created_at | 2015-07-07T03:30:53Z | - | data | {} | - | dependents | {} | - | desired_capacity | 2 | - | domain_id | None | - | id | 2b7e9294-b5cd-470f-b191-b18f7e672495 | - | init_at | 2015-05-07T03:30:52Z | - | location | None | - | max_size | -1 | - | metadata | {} | - | min_size | 0 | - | name | c3 | - | node_ids | b28692a5-2536-4921-985b-1142d6045e1f | - | | 4be10a88-e340-4518-a9e1-d742c53ac37f | - | profile_id | bf38dc9f-d204-46c9-b515-79caf1e45c4d | - | profile_name | qstack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | status | ACTIVE | - | status_reason | Node stack2: Creation succeeded | - | timeout | 3600 | - | updated_at | None | - | user_id | 0b82043b57014cd58add97a2ef79dac3 | - +------------------+--------------------------------------+ - -From the result, you can examine the list of nodes (if any) that are members -of this cluster. - - -Updating a Cluster -~~~~~~~~~~~~~~~~~~ - -Once a cluster has been created, you change its properties using the -:program:`openstack cluster update` command. For example, to change the name -of a cluster, you can use the following command:: - - $ openstack cluster update --name web_bak web_servers - -You can change the ``timeout`` property using option :option:`--timeout`. -You can change the metadata associated with cluster using option -:option:`--metadata`. - -Using the :command:`openstack cluster update` command, you can change the -profile used by the cluster and its member nodes. The following example -launches a global update on the cluster for switching to a different profile:: - - $ openstack cluster update --profile fedora21_server web_cluster - -Suppose the cluster ``web_cluster`` is now using a profile of type -``os.nova.server`` where a Fedora 20 image is used, the command above will -initiate a global upgrade to a new profile where a Fedora 21 image is used. - -Senlin engine will verify whether the new profile has the same profile type -with that of the existing one and whether the new profile has a well-formed -``spec`` property. If everything is fine, the engine will start a node level -profile update process. The node level update operation is subject to policy -checkings/enforcements when there is an update policy attached to the cluster. -Please refer to :ref:`ref-policies` and :ref:`ref-bindings` for more -information. - - -Resizing a Cluster -~~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line supports several different -sub-commands to resize a cluster. - - -``openstack cluster resize`` ----------------------------- - -The command :command:`openstack cluster resize` takes several arguments that -allow you to resize a cluster in various ways: - -- you can change the size of a cluster to a specified number; -- you can add a specified number of nodes to a cluster or remove a specified - number of nodes from a cluster; -- you can instruct :program:`openstack cluster resize` to resize a cluster by - a specified percentage; -- you can tune the ``min_size`` and/or ``max_size`` property of a cluster when - resizing it; -- you can request a size change made on a best-effort basis, if the resize - operation cannot be fully realized due to some restrictions, this argument - tells Senlin engine whether it is still expected to partially realize the - resize operation. - -You can specify one and only one of the following options for the -:command:`openstack cluster resize` command: - -- use :option:`--capacity ` to specify - the exact value of the new cluster size; -- use :option:`--adjustment ` to - specify the relative number of nodes to add/remove; -- use :option:`--percentage ` to - specify the percentage of cluster size change. - -The following command resizes the cluster ``test_cluster`` to 2 nodes, -provided that the ``min_size`` is less than or equal to 2 and the ``max_size`` -is either no less than 2 or equal to -1 (indicating that there is no upper -bound for the cluster size). This command makes use of the option -:option:`--capacity `, where ```` is the new size of the -cluster:: - - $ openstack cluster resize --capacity 2 test_cluster - -Another way to resize a cluster is by specifying the :option:`--adjustment -` option, where ```` can be a positive or a negative -integer giving the number of nodes to add or remove respectively. For example, -the following command adds two nodes to the specified cluster:: - - $ openstack cluster resize --adjustment 2 test_cluster - -The following command removes two nodes from the specified cluster:: - - $ openstack cluster resize --adjustment -2 test_cluster - -Yet another way to resize a cluster is by specifying the size change in -percentage. You will use the option :option:`--percentage ` for -this purpose. The ```` value can be either a positive float value -or a negative float value giving the percentage of cluster size. For example, -the following command increases the cluster size by 30%:: - - $ openstack cluster resize --percentage 30 test_cluster - -The following command decreases the cluster size by 25%:: - - $ openstack cluster resize --percentage -25 test_cluster - -Senlin engine computes the actual number of nodes to add or to remove based on -the current size of the cluster, the specified percentage value, the -constraints (i.e. the ``min_size`` and the ``max_size`` properties). - -When computing the new capacity for the cluster, senlin engine will determine -the value based on the following rules: - -- If the value of new capacity is greater than 1.0 or less than -1.0, it will - be rounded to the integer part of the value. For example, 3.4 will be rounded - to 3, -1.9 will be rounded to -1; -- If the value of the new capacity is between 0 and 1, Senlin will round it up - to 1; -- If the value of the new capacity is between 0 and -1, Senlin will round it - down to -1; -- The new capacity should be in the range of ``min_size`` and ``max_size``, - inclusively, unless option :option:`--strict` is specified; -- The range checking will be performed against the current size constraints if - no new value for ``min_size`` and/or ``max_size`` is given, or else Senlin - will first verify the new size constraints and perform range checking - against the new constraints; -- If option :option:`--min-step ` is specified, the ```` - value will be used if the absolute value of the new capacity value is less - than ````. - -If option :option:`--strict`` is specified, Senlin will strictly conform to -the cluster size constraints. If the capacity value falls out of the range, -the request will be rejected. When :option:`--strict` is set to ``False``, -Senlin engine will do a resize on a best-effort basis. - -Suppose we have a cluster A with ``min_size`` set to 5 and its current size is -7. If the new capacity value is 4 and option :option:`--strict` is set to -``True``, the request will be rejected with an error message. If the new -capacity value is 4 and the option :option:`--strict` is not set, Senlin will -try resize the cluster to 5 nodes. - -Along with the :command:`openstack cluster resize` command, you can specify -the new size constraints using either the option :option:`--min-size` or -the option :option:`--max-size` or both. - - -``openstack cluster shrink`` and ``openstack cluster expand`` -------------------------------------------------------------- - -The :command:`openstack cluster shrink` command and the -:command:`openstack cluster expand` command are provided for convenience when -you want to remove a specific number of nodes from a cluster or add a specific -number of nodes to a cluster, respectively. These two commands both take an -argument ```` which is a positive integer representing the number of -nodes to add or remove. For example, the following command adds two nodes to -the ``web_servers`` cluster:: - - $ openstack cluster expand --count 2 web_servers - -The following command removes two nodes from the ``web_servers`` cluster:: - - $ openstack cluster shrink --count 2 web_servers - -The option :option:`--count ` is optional. If this option is specified, -Senlin will use it for cluster size change, even when there are scaling -policies attached to the cluster. If this option is omitted, however, Senlin -will treat it as implicitly set to value 1. - - -Checking a Cluster -~~~~~~~~~~~~~~~~~~ - -A cluster can be checked using the :command:`openstack cluster check` -command, for example:: - - $ openstack cluster check mycluster - -All nodes belonging to the specified cluster will perform the check operation. -If a node's physical resource is not ACTIVE, the node status will be changed -as part of the check operation. - - -Recovering a Cluster -~~~~~~~~~~~~~~~~~~~~ -A cluster can be recovered using the :command:`openstack cluster recover` -command, for example:: - - $ openstack cluster recover mycluster --check true - -The option :option:`--check ` is optional. If this option is set, -the cluster will perform check operation before doing recovery. The restore -operation will delete nodes from the specified cluster and recreate it. - - -Deleting a Cluster -~~~~~~~~~~~~~~~~~~ - -A cluster can be deleted using the :command:`openstack cluster delete` -command, for example:: - - $ openstack cluster delete mycluster - -Note that in this command you can use the name, the ID or the "short ID" to -specify the cluster object you want to delete. If the specified criteria -cannot match any clusters, you will get a ``ResourceNotFound`` exception. If -more than one cluster matches the criteria, you will get a ``MultipleChoices`` -exception. - -When there are nodes in the cluster, the Senlin engine will launch a process -to delete all nodes from the cluster and destroy them before deleting the -cluster object itself. - - -See Also -~~~~~~~~ - -There are other operations related to clusters. Please refer to the following -links for operations related to cluster membership management and the creation -and management of cluster-policy bindings: - -- :doc:`Managing Cluster Membership ` -- :doc:`Binding Policies to Clusters ` -- :doc:`Examining Actions ` -- :doc:`Browsing Events ` diff --git a/doc/source/user/events.rst b/doc/source/user/events.rst deleted file mode 100644 index 7d9543c00..000000000 --- a/doc/source/user/events.rst +++ /dev/null @@ -1,226 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-events: - -====== -Events -====== - -An :term:`Event` is a record generated during engine execution. Such an event -captures what has happened inside the senlin-engine. The senlin-engine service -generates event records when it is performing some actions or checking -policies. - -An event has a ``level`` property which can be interpreted as the severity -level value of the event: - -* 10: interpreted as ``DEBUG`` level. Events at this level can be ignored - safely by users. For developers they may provide some useful information for - debugging the code. -* 20: interpreted as ``INFO`` level. Events at this level are mostly about - notifying that some operations have been successfully performed. -* 30: interpreted as ``WARNING`` level. Events at this level are used to - signal some unhealthy status or anomalies detected by the engine. These - events should be monitored and checked when operating a cluster. -* 40: interpreted as ``ERROR`` level. Events at this level signifies some - failures in engine operations. These event should be monitored and checked - when operating a cluster. Usually some user intervention is expected to - recover a cluster from this status. -* 50: interpreted as ``CRITICAL`` level. Events at this level are about - serious problems encountered by the engine. The engine service may have - run into some bugs. User intervention is required to do a recovery. - -Event Dispatcher Configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Senlin provides an open architecture for event dispatching. Two of the -built-in dispatchers are ``database`` and ``message``. - -1. The ``database`` dispatcher dumps the events into database tables and it -is enabled by default. - -2. The ``message`` dispatcher converts the event objects into versioned event -notifications and published on the global message queue. This dispatcher is -by default disabled. To enable it, you can add the following line to the -``[DEFAULT]`` section of the ``senlin.conf`` file and then restart the service -engine:: - - [default] - event_dispatchers = message - -Based on your deployment settings, you have to add the following lines to -the ``senlin.conf`` file as well when using ``message`` dispatcher. This lines -set ``messaging`` as the default driver used by the ``oslo.messaging`` -package:: - - [oslo_messaging_notifications] - driver = messaging - -With this configuration, the `database` dispatcher will be disabled, which -means you can only access to the events by the message queue. - -3. The ``event_dispatchers`` field is ``MultiString``, you can enable -both the ``database`` and ``message`` dispatchers if needed by the following -configuration:: - - [default] - event_dispatchers = database - event_dispatchers = message - - [oslo_messaging_notifications] - driver = messaging - -Note that unprocessed event notifications which are not associated with a -TTL (time to live) value by default will remain queued at the message bus, -please make sure the Senlin event notifications will be subscribed and -processed by some services before enabling the ``message`` dispatcher. - -By default, we use the ``senlin`` exchange which type is ``TOPIC`` to route -the notifications to queues with different ``routing_key``. The queues name -could be ``versioned_notifications.debug``, ``versioned_notifications.info``, -``versioned_notifications.warn`` and ``versioned_notifications.error`` that -depends on the log level you are using in ``senlin.conf``. The corresponding -``routing_key`` are the same as the queues' name. - -There are two options to consume the notifications: - -- Consume the notifications from the default queues directly. -- Declare your own queues, then bind them to ``senlin`` exchange with - corresponding ``routing_key`` to customize the flow. - -Since the event dispatchers are designed as plug-ins, you can develop your own -event dispatchers and have senlin engine load them on startup. For more -details on developing and plugging in your own event dispatchers, please refer -to the :doc:`../contributor/plugin_guide` document. - -The following sections are about examining events when using the ``database`` -dispatcher which creates database records when events happen. - - -Listing Events -~~~~~~~~~~~~~~ - -The following command lists the events by the Senlin engine:: - - $ openstack cluster event list - +----------+---------------------+---------------+----------+--------------+-----------------------+-----------+-------+------------+ - | id | generated_at | obj_type | obj_id | obj_name | action | status | level | cluster_id | - +----------+---------------------+---------------+----------+--------------+-----------------------+-----------+-------+------------+ - | 1f72eb5e | 2015-12-17T15:41:48 | NODE | 427e64f3 | node-7171... | update | ACTIVE | 20 | | - | 20b8eb9a | 2015-12-17T15:41:49 | NODE | 6da22a49 | node-7171... | update | ACTIVE | 20 | | - | 23721815 | 2015-12-17T15:42:51 | NODEACTION | 5e9a9d3d | node_dele... | NODE_DELETE | START | 20 | | - | 54f9eae4 | 2015-12-17T15:41:36 | CLUSTERACTION | 1bffa11d | cluster_c... | CLUSTER_CREATE | SUCCEEDED | 20 | 9f1883a7 | - | 7e30df62 | 2015-12-17T15:42:51 | CLUSTERACTION | d3cef701 | cluster_d... | CLUSTER_DELETE | START | 20 | 9f1883a7 | - | bf51f23c | 2015-12-17T15:41:54 | CLUSTERACTION | d4dbbcea | cluster_s... | CLUSTER_SCALE_OUT | START | 20 | 9f1883a7 | - | c58063e9 | 2015-12-17T15:42:51 | NODEACTION | b2292bb1 | node_dele... | NODE_DELETE | START | 20 | | - | ca7d30c6 | 2015-12-17T15:41:38 | CLUSTERACTION | 0be70b0f | attach_po... | CLUSTER_ATTACH_POLICY | START | 20 | 9f1883a7 | - | cfe5d0d7 | 2015-12-17T15:42:51 | CLUSTERACTION | 42cf5baa | cluster_d... | CLUSTER_DELETE | START | 20 | 9f1883a7 | - | fe2fc810 | 2015-12-17T15:41:49 | CLUSTERACTION | 0be70b0f | attach_po... | CLUSTER_ATTACH_POLICY | SUCCEEDED | 20 | 9f1883a7 | - +----------+---------------------+---------------+----------+--------------+-----------------------+-----------+-------+------------+ - -The :program:`openstack cluster event list` command line supports various -options when listing the events. - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list events, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are event properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the events using the ``timestamp`` -property in descending order:: - - $ openstack cluster event list --sort timestamp:desc - -When sorting the list of events, you can use one of ``timestamp``, ``level``, -``otype``, ``oname``, ``user``, ``action`` and ``status``. - - -Filtering the List ------------------- - -You can filter the list of events using the :option:`--filters``. For example, -the following command filters the event list by the ``otype`` property:: - - $ openstack cluster event list --filters otype=NODE - -The option :option:`--filters` accepts a list of key-value pairs separated by -semicolon (``;``), where each pair is expected to be of format ``key=val``. -The valid keys for filtering include ``oname``, ``otype``, ``oid``, -``cluster_id``, ``action``, ``level`` or any combination of them. - - -Paginating the Query results ----------------------------- - -In case you have a huge collection of events (which is highly likely the case), -you can limit the number of events returned using the option -:option:`--limit `. For example:: - - $ openstack cluster event list --limit 10 - -Another option you can specify is the ID of an event after which you want to -see the returned list starts. In other words, you don't want to see those -events with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster event list --limit 20 \ - --marker 2959122e-11c7-4e82-b12f-f49dc5dac270 - -At most 20 action records will be returned in this example and its UUID comes -after the one specified from the command line. - - -Showing Details of an Event -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the :program:`senlin` command line to show the details about an -event you are interested in. When specifying the identity of the event, you -can use its name, its ID or its "short ID" . Senlin API and engine will verify -if the identifier you specified can uniquely identify an event. An error -message will be returned if there is no event matching the identifier or if -more than one event matching it. - -An example is shown below:: - - $ openstack cluster event show 19ba155a - +---------------+--------------------------------------+ - | Field | Value | - +---------------+--------------------------------------+ - | action | NODE_DELETE | - | cluster_id | ce85d842-aa2a-4d83-965c-2cab5133aedc | - | generated_at | 2015-12-17T15:43:26+00:00 | - | id | 19ba155a-d327-490f-aa0f-589f67194b2c | - | level | INFO | - | location | None | - | name | None | - | obj_id | cd9f519a-5589-4cbf-8a74-03b12fd9436c | - | obj_name | node-ce85d842-003 | - | obj_type | NODE | - | project_id | 42d9e9663331431f97b75e25136307ff | - | status | end | - | status_reason | Node deleted successfully. | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +---------------+--------------------------------------+ - - -See Also -~~~~~~~~ - -* :doc:`Operating Actions ` diff --git a/doc/source/user/membership.rst b/doc/source/user/membership.rst deleted file mode 100644 index 138f8d676..000000000 --- a/doc/source/user/membership.rst +++ /dev/null @@ -1,165 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-membership: - -================== -Cluster Membership -================== - -Concept -~~~~~~~ - -A :term:`Node` can belong to at most one :term:`Cluster` at any time. A node -is referred to as an *orphan node* when it doesn't belong to any cluster. - -A node can be made a member of cluster when creation, or you can change the -cluster membership after the cluster and the node have been created. - - -Listing Nodes in a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Using the command :command:`openstack cluster members list`, you can list the -nodes that are members of a specific cluster. For example, to list nodes in -cluster ``c3``, you can use the following command:: - - $ openstack cluster members list c3 - +----------+--------+-------+--------+-------------+---------------------+ - | id | name | index | status | physical_id | created_at | - +----------+--------+-------+--------+-------------+---------------------+ - | b28692a5 | stack1 | 1 | ACTIVE | fdf028a6 | 2015-07-07T05:23:40 | - | 4be10a88 | stack2 | 2 | ACTIVE | 7c87f545 | 2015-07-07T05:27:54 | - +----------+--------+-------+--------+-------------+---------------------+ - -You can use the name, the ID or the "short ID" of a cluster as the argument -for node listing. If the specified cluster identifier cannot match any cluster -or it matches more than one cluster, you will get an error message. - -From the list, you can see the ``index``, ``status``, ``physical_id`` of each -node in this cluster. Note that the ``id`` field and the ``physical_id`` field -are shown as "short ID"s by default. If you want to see the full IDs, you can -specify the :option:`--full-id` option to indicate that:: - - $ openstack cluster members list --full-id c3 - +------------...-+--------+-------+--------+-------------+-----------..-+ - | id | name | index | status | physical_id | created_at | - +------------...-+--------+-------+--------+-------------+-----------..-+ - | b28692a5-25... | stack1 | 1 | ACTIVE | fdf0... | 2015-07-07.. | - | 4be10a88-e3... | stack2 | 2 | ACTIVE | 7c87... | 2015-07-07.. | - +------------...-+--------+-------+--------+-------------+-----------..-+ - -If the cluster size is very large, you may want to list the nodes in pages. -This can be achieved by using the :option:`--marker` option together with the -:option:`--limit` option. The ``marker`` option value specifies a node ID -after which you want the resulted list to start; and the ``limit`` option -value specifies the number of nodes you want to include in the resulted list. -For example, the following command lists the nodes starting after a specific -node ID with the length of the list set to 10:: - - $ openstack cluster members list --marker b28692a5 --limit 10 webservers - -Another useful option for listing nodes is the :option:`--filters ` -option. The option value accepts a string of format "``K1=V1;K2=V2...``", -where "``K1``" and "``K2``" are node properties for checking, "``V1``" and -"``V2``" are values for filtering. The acceptable properties for filtering are -``name`` and ``status``. For example, the following command lists cluster -nodes from a cluster based on whether a node's status is "``ACTIVE``":: - - $ openstack cluster members list --filters status=ACTIVE webservers - - -Specify the Cluster When Creating a Node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are several ways to make a node a member of a cluster. When creating a -node using command :command:`openstack cluster node create`, you can specify -the option :option:`--cluster` to tell Senlin to which cluster the new node -belongs. Please refer to :ref:`ref-nodes` for detailed instructions. - - -Adding Node(s) to A Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When you already have some nodes and some clusters, you can add some specified -nodes to a specified cluster using the :command:`openstack cluster members add` -command. For example, the following command adds two nodes to a cluster:: - - $ openstack cluster members add --nodes node3,node4 cluster1 - -You can use the name, the ID or the "short ID" to name the node(s) to be -added, you can also use the name, the ID or the "short ID" to specify the -cluster. When the identifiers you specify cannot match any existing nodes or -clusters respectively, you will receive an error message. If the identifier -provided matches more than one object, you will get an error message as well. - -Before Senlin engine performs the cluster membership changes, it will verify -if the nodes to be added have the same :term:`Profile Type` with the target -cluster. If the profile types don't match, you will get an error message. - -When nodes are added to a cluster, they will get new ``index`` property values -that can be used to uniquely identify them within the cluster. - - -Removing Node(s) from a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line also provides command -:command:`cluster members del` to remove node(s) from a cluster. In this case, -you can use the name, the ID or the "short ID" to specify the node(s) and the -cluster. The identifier specified must uniquely identifies a node or a cluster -object, or else you will get an error message indicating that the request was -rejected. The following command removes two nodes from a cluster:: - - $ openstack cluster members del --nodes node21,node22 webservers - -When performing this operation, Senlin engine will check if the specified -nodes are actually members of the specified cluster. If any node from the -specified node list does not belong to the target cluster, you will get an -error message and the command fails. - -When nodes are removed from a cluster, they will get their ``index`` property -reset to -1. - - -Replacing Node(s) in a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line also provides command -:command:`cluster members replace` to replace node(s) in a cluster. The argument -"--nodes" is used to describe the list of node pairs like . -OLD_NODE is the name or ID of a node to be replaced, and NEW_NODE is the name or -ID of a node as replacement. You can use the name, the ID or the "short ID" to -specify the cluster. The identifier specified must uniquely identifies a node -or a cluster object, or else you will get an error message indicating that the -request was rejected. The following command replaces node21 with node22:: - - $ openstack cluster members replace --nodes node21=node22 webservers - -When performing this operation, Senlin engine will check if the replaced -nodes are actually members of the specified cluster. If any node from the -specified node list does not belong to the target cluster, you will get an -error message and the command fails. - -When nodes are removed from the cluster, they will get their ``index`` property -reset to -1. - - -See Also -~~~~~~~~ - -Below are links to documents related to clusters and nodes: - -- :doc:`Creating Clusters ` -- :doc:`Creating Nodes ` diff --git a/doc/source/user/nodes.rst b/doc/source/user/nodes.rst deleted file mode 100644 index f3b43d686..000000000 --- a/doc/source/user/nodes.rst +++ /dev/null @@ -1,487 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-nodes: - -===== -Nodes -===== - -Concept -~~~~~~~ - -A :term:`Node` is a logical object managed by the Senlin service. A node can -be a member of at most one cluster at any time. A node can be an orphan node -which means it doesn't belong to any clusters. Senlin provides APIs and -command line supports to manage node's cluster membership. Please refer to -:ref:`ref-membership` for details. - -A node has a ``profile_id`` property when created that specifies which -:term:`Profile` to use when creating a physical object that backs the node. -Please refer to :ref:`ref-profiles` for the creation and management of -profile objects. - - -Listing Nodes -~~~~~~~~~~~~~ - -To list nodes that are managed by the Senlin service, you will use the command -:command:`openstack cluster node list`. For example:: - - $ openstack cluster node list - +----------+--------+-------+--------+------------+-------------+... - | id | name | index | status | cluster_id | physical_id | - +----------+--------+-------+--------+------------+-------------+... - | e1b39a08 | node1 | -1 | ACTIVE | | 89ce0d2b | - | 57962220 | node-3 | -1 | ACTIVE | | 3386e306 | - | b28692a5 | stack1 | 1 | ACTIVE | 2b7e9294 | fdf028a6 | - | 4be10a88 | stack2 | 2 | ACTIVE | 2b7e9294 | 7c87f545 | - +----------+--------+-------+--------+------------+-------------+... - -Note that some columns in the output table are *short ID* of objects. Senlin -command line use short IDs to save real estate on screen so that more useful -information can be shown on a single line. To show the *full ID* in the list, -you can add the option :option:`--full-id` to the command. - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list nodes, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are node properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the nodes using the ``status`` -property in descending order:: - - $ openstack cluster node list --sort status:desc - -When sorting the list of nodes, you can use one of ``index``, ``name``, -``status``, ``init_at``, ``created_at`` and ``updated_at``. - - -Filtering the List ------------------- - -You can specify the option :option:`--cluster ` to list nodes that -are members of a specific cluster. For example:: - - $ openstack cluster node list --cluster c3 - +----------+---------+-------+--------+------------+-------------+... - | id | name | index | status | cluster_id | physical_id | - +----------+---------+-------+--------+------------+-------------+... - | b28692a5 | stack1 | 1 | ACTIVE | 2b7e9294 | fdf028a6 | - | 4be10a88 | stack2 | 2 | ACTIVE | 2b7e9294 | 7c87f545 | - +----------+---------+-------+--------+------------+-------------+... - -Besides these two options, you can add the option :option:`--filters -` to the command :command:`openstack cluster node list` to -specify keys (node property names) and values you want to filter the list. -The valid keys for filtering are ``name`` and ``status``. For example, the -command below filters the list by node status ``ACTIVE``:: - - $ openstack cluster node list --filters status=ACTIVE - - -Paginating the List -------------------- - -In case you have a large number of nodes, you can limit the number of nodes -returned from Senlin server each time, using the option :option:`--limit -`. For example:: - - $ openstack cluster node list --limit 1 - -Another option you can specify is the ID of a node after which you want to -see the returned list starts. In other words, you don't want to see those -nodes with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster node list --marker 765385ed-f480-453a-8601-6fb256f512fc - -With option :option:`--marker` and option :option:`--limit`, you will be able -to control how many node records you will get from each request. - - -Creating a Node -~~~~~~~~~~~~~~~ - -To create a node, you need to specify the ID or name of the profile to be -used. For example, the following example creates a node named ``test_node`` -using a profile named ``pstack``:: - - $ openstack cluster node create --profile pstack test_node - +---------------+--------------------------------------+ - | Property | Value | - +---------------+--------------------------------------+ - | cluster_id | | - | created_at | None | - | data | {} | - | dependents | {} | - | details | None | - | domain_id | None | - | id | 1984b5a0-9dd7-4dda-b1e6-e8c1f640598f | - | index | -1 | - | init_at | 2015-07-09T11:41:18 | - | location | None | - | metadata | {} | - | name | test_node | - | physical_id | None | - | profile_id | 9b127538-a675-4271-ab9b-f24f54cfe173 | - | profile_name | pstack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | role | | - | status | INIT | - | status_reason | Initializing | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +---------------+--------------------------------------+ - -When processing this request, Senlin engine will verify if the profile value -specified is a profile name, a profile ID or the short ID of a profile object. -If the profile is not found or multiple profiles found matching the value, you -will receive an error message. - -Note that the ``index`` property of the new node is -1. This is because we -didn't specify the owning cluster for the node. To join a node to an existing -cluster, you can either use the :command:`openstack cluster member add` -command (:ref:`ref-membership`) after the node is created, or specify the -owning cluster upon node creation, as shown by the following example:: - - $ openstack cluster node create --profile pstack --cluster c1 test_node - -The command above creates a new node using profile ``pstack`` and makes it a -member of the cluster ``c1``, specified using the option :option:`--cluster`. -When a node becomes a member of a cluster, it will get a value for its -``index`` property that uniquely identifies itself within the owning cluster. - -When the owning cluster is specified, Senlin engine will verify if the cluster -specified is referencing a profile that has the same :term:`Profile Type` as -that of the new node. If the profile types don't match, you will receive an -error message from the :command:`openstack cluster` command. - -Another argument that could be useful when creating a new node is the option -:option:`--role `. The value could be used by a profile type -implementation to treat nodes differently. For example, the following command -creates a node with a ``master`` role:: - - $ openstack cluster node create --profile pstack --cluster c1 \ - --role master master_node - -A profile type implementation may check this role value when operating the -physical object that backs the node. It is okay for a profile type -implementation to ignore this value. - -The last argument you can specify when creating a new node is the option -:option:`--metadata `. The value for this option is a list of -key-value pairs separated by a semicolon ('``;``'). These key-value pairs are -attached to the node and can be used for whatever purposes. For example:: - - $ openstack cluster node create --profile pstack \ - --metadata owner=JohnWhite test_node - - -Showing Details of a Node -~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the name, the ID or the "short ID" of a node to name a node for -show. The Senlin API and engine will verify if the identifier you specified -can uniquely identify a node. An error message will be returned if there is -no node matching the identifier or if more than one node matching it. - -An example is shown below:: - - $ openstack cluster node show test_node - +---------------+--------------------------------------+ - | Field | Value | - +---------------+--------------------------------------+ - | cluster_id | None | - | created_at | 2015-07-09T11:41:20 | - | data | {} | - | dependents | {} | - | details | {} | - | domain_id | None | - | id | 1984b5a0-9dd7-4dda-b1e6-e8c1f640598f | - | index | -1 | - | init_at | 2015-07-09T11:41:18 | - | location | None | - | metadata | {} | - | name | test_node | - | physical_id | 0e444642-b280-4c88-8be4-76ad0d158dac | - | profile_id | 9b127538-a675-4271-ab9b-f24f54cfe173 | - | profile_name | pstack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | role | None | - | status | ACTIVE | - | status_reason | Creation succeeded | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +---------------+--------------------------------------+ - -From the output, you can see the ``physical_id`` of a node (if it has been -successfully created). For different profile types, this value may be the -ID of an object that is of certain type. For example, if the profile type used -is "``os.heat.stack``", this means the Heat stack ID; if the profile type used -is "``os.nova.server``", it gives the Nova server ID. - -An useful argument for the command :command:`openstack cluster node show` is -the option :option:`--details`. When specified, you will get the details about -the physical object that backs the node. For example:: - - $ openstack cluster node show --details test_node - - -Checking a Node -~~~~~~~~~~~~~~~ - -Once a node has been created, you can use the name, the ID or the "short ID" of -a node to name a node for check. senlin-engine performs a profile-specific check -operation to get the latest status of the physical resource (for example a virtual -machine). If the virtual machine status is not ACTIVE, the node will be set to -ERROR status. For example:: - - $ openstack cluster node check node-biQA3BOM - - -Recovering a Node -~~~~~~~~~~~~~~~~~ -After a node has been created and running for a period of time, if the node goes -into ERROR status, you can use to try to restore the node to ACTIVE status, using -the :command:`openstack cluster node recover`. The restore operation will delete -the specified node and recreate it. For example:: - - $ openstack cluster node recover node-biQA3BOM - - -Updating a Node -~~~~~~~~~~~~~~~ - -Once a node has been created, you can change its properties using the command -:command:`openstack cluster node update`. For example, to change the name of a -node, you can use the option :option:`--name` , as shown by the following -command:: - - $ openstack cluster node update --name new_node_name old_node_name - -Similarly, you can modify the ``role`` property of a node using the option -:option:`--role`. For example:: - - $ openstack cluster node update --role slave master_node - -You can change the metadata associated with a node using the option -:option:`--metadata`:: - - $ openstack cluster node update --metadata version=2.1 my_node - -Using the :command:`openstack cluster node update` command, you can change the -profile used by a node. The following example updates a node for switching to -use a different profile:: - - $ openstack cluster node update --profile fedora21_server fedora20_server - -Suppose the node ``fedora20_server`` is now using a profile of type -``os.nova.server`` where a Fedora 20 image is used, the command above will -initiate an upgrade to use a new profile with a Fedora 21 image. - -Senlin engine will verify whether the new profile has the same profile type -with that of the existing one and whether the new profile has a well-formed -``spec`` property. If everything is fine, the engine will start profile update -process. - - -Adopting a Node -~~~~~~~~~~~~~~~ - -In Senlin service, we can adopt an existing resource as a node and create a -profile for this node. To adopt a node, you need to specify the resource -physical ID by setting :option:`--identity ` and resource -profile_type name by setting :option:`--type `. For example, the -following example adopts a server with ID -``1177c8e8-8472-4e9d-8f15-1d4866b85b8b`` as a node named ``test_adopt_node``:: - - $ openstack cluster node adopt --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b --type os.nova.server-1.0 \ - --name test_adopt_node - +---------------+--------------------------------------+ - | Field | Value | - +---------------+--------------------------------------+ - | cluster_id | | - | created_at | 2017-08-16T07:52:50Z | - | data | {} | - | dependents | {} | - | details | None | - | domain_id | None | - | id | f88b1d7d-1e25-4362-987c-52f8aea26520 | - | index | -1 | - | init_at | 2017-08-16T07:52:50Z | - | location | None | - | metadata | {} | - | name | test_adopt_node | - | physical_id | 1177c8e8-8472-4e9d-8f15-1d4866b85b8b | - | profile_id | f9e5e3dd-d4f3-44a1-901e-351fa39e5801 | - | profile_name | prof-test_adopt_node | - | project_id | 138cf3f92bb3459da02363db8d53ac30 | - | role | | - | status | ACTIVE | - | status_reason | Node adopted successfully | - | updated_at | None | - | user_id | 67dc524bfb45492496c8ff7ecdedd394 | - +---------------+--------------------------------------+ - -The :option:`--name ` is optional, if omitted, Senlin engine will -generate a random name start with ``node-`` for the node. - -The option :option:`--role ` could be used by a profile type -implementation to treat nodes differently. For example, the following command -adopts a server as a node with a ``master`` role:: - - $ openstack cluster node adopt --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b --type os.nova.server-1.0 \ - --name test_adopt_node --role master - -The option :option:`--metadata ` is a list of -key-value pairs separated by a semicolon ('``;``'). These key-value pairs are -attached to the node and can be used for whatever purposes. For example:: - - $ openstack cluster node adopt --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b --type os.nova.server-1.0 \ - --name test_adopt_node --metadata "key1=value1;key2=value2" - -Another option :option:`--overrides ` support user to override -the node profile properties. For example, the following command can adopt a -server as a node and override the network properties in node's profile:: - - $ openstack cluster node adopt --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b \ - --type os.nova.server-1.0 \ - --override '{"networks":[{"network": "public"}]}' - -The option :option:`--snapshot ` is boolean type. If set, senlin -Senlin engine will create a snapshot for the resource before accept the -resource as a node. - - -Previewing a Node for Adoption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A resource can be previewed before getting adopted as a Senlin node using the -:command:`openstack cluster node adopt` command with option -:option:`--preview `. To preview a node, you need to specify the -resource physical ID by setting :option:`--identity ` and resource -profile_type name by setting :option:`--type `. For example:: - - $ openstack cluster node adopt --preview \ - --identity 1177c8e8-8472-4e9d-8f15-1d4866b85b8b \ - --type os.nova.server-1.0 - +--------------+----------------------------------------------------------------------+ - | Field | Value | - +--------------+----------------------------------------------------------------------+ - | node_preview | +------------+-----------------------------------------------------+ | - | | | property | value | | - | | +------------+-----------------------------------------------------+ | - | | | properties | { | | - | | | | "name": "test0", | | - | | | | "availability_zone": "nova", | | - | | | | "block_device_mapping_v2": [], | | - | | | | "image": "6232a7b9-8af1-4dce-8eb5-f2988a0e34bc", | | - | | | | "key_name": "oskey", | | - | | | | "auto_disk_config": false, | | - | | | | "flavor": "1", | | - | | | | "metadata": {}, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "private" | | - | | | | } | | - | | | | ], | | - | | | | "security_groups": [ | | - | | | | "default", | | - | | | | "default" | | - | | | | ], | | - | | | | "config_drive": false | | - | | | | } | | - | | | type | os.nova.server | | - | | | version | 1.0 | | - | | +------------+-----------------------------------------------------+ | - +--------------+----------------------------------------------------------------------+ - -The option :option:`--overrides ` support user to override the node -profile properties. For example, the following command can adopt a server -as a node and override the network properties in node's profile:: - - $ openstack cluster node adopt --preview --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b \ - --type os.nova.server-1.0 \ - --override '{"networks":[{"network": "public"}]}' - +--------------+----------------------------------------------------------------------+ - | Field | Value | - +--------------+----------------------------------------------------------------------+ - | node_preview | +------------+-----------------------------------------------------+ | - | | | property | value | | - | | +------------+-----------------------------------------------------+ | - | | | properties | { | | - | | | | "name": "test0", | | - | | | | "availability_zone": "nova", | | - | | | | "block_device_mapping_v2": [], | | - | | | | "image": "6232a7b9-8af1-4dce-8eb5-f2988a0e34bc", | | - | | | | "key_name": "oskey", | | - | | | | "auto_disk_config": false, | | - | | | | "flavor": "1", | | - | | | | "metadata": {}, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "public" | | - | | | | } | | - | | | | ], | | - | | | | "security_groups": [ | | - | | | | "default", | | - | | | | "default" | | - | | | | ], | | - | | | | "config_drive": false | | - | | | | } | | - | | | type | os.nova.server | | - | | | version | 1.0 | | - | | +------------+-----------------------------------------------------+ | - +--------------+----------------------------------------------------------------------+ - -The option :option:`--snapshot ` is boolean type. If set, senlin -Senlin engine will create a snapshot for the resource before accept the -resource as a node. - - -Deleting a Node -~~~~~~~~~~~~~~~ - -A node can be deleted using the :command:`openstack cluster node delete` -command, for example:: - - $ openstack cluster node delete my_node - -Note that in this command you can use the name, the ID or the "short ID" to -specify the node you want to delete. If the specified criteria cannot match -any nodes, you will get a ``ResourceNotFound`` exception. If more than one -node matches the criteria, you will get a ``MultipleChoices`` exception. - - -See Also -~~~~~~~~ - -Below are links to documents related to node management: - -- :doc:`Managing Profile Objects ` -- :doc:`Creating Clusters ` -- :doc:`Managing Cluster Membership ` -- :doc:`Examining Actions ` -- :doc:`Browsing Events ` diff --git a/doc/source/user/policies.rst b/doc/source/user/policies.rst deleted file mode 100644 index d4217fad9..000000000 --- a/doc/source/user/policies.rst +++ /dev/null @@ -1,246 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-policies: - -======== -Policies -======== - -Concept -~~~~~~~ - -A :term:`Policy` is an object instantiated from a :term:`Policy Type`. Once -created, it can be dynamically attached to or detached from a cluster. Such a -policy usually contains rules to be checked/enforced when certain -:term:`Action` is about to be executed or has been executed. - -One policy can be attached to many clusters, and one cluster can be attached -with many policies. In addition to this, a policy on a cluster can be -dynamically enabled or disabled. Please refer to :ref:`ref-bindings` for -details. - - -Listing Policies -~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line provides a sub-command -:command:`openstack cluster policy list` that can be used to enumerate policy -objects known to the service. For example:: - - $ openstack cluster policy list - +----------+------+-----------------------------+---------------------+ - | id | name | type | created_at | - +----------+------+-----------------------------+---------------------+ - | 239d7212 | dp01 | senlin.policy.deletion-1.0 | 2015-07-11T04:24:34 | - | 7ecfd026 | lb01 | senlin.policy.placement-1.0 | 2015-07-11T04:25:28 | - +----------+------+-----------------------------+---------------------+ - -Note that the first column in the output table is a *short ID* of a policy -object. Senlin command line use short IDs to save real estate on screen so -that more useful information can be shown on a single line. To show the *full -ID* in the list, you can add the :option:`--full-id` option to the command. - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list policies, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are policy properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the policies using the ``name`` -property in descending order:: - - $ openstack cluster policy list --sort name:desc - -When sorting the list of policies, you can use one of ``type``, ``name``, -``created_at`` and ``updated_at``. - - -Paginating the List -------------------- - -In case you have a huge collection of policy objects, you can limit the number -of policies returned from Senlin server, using the option :option:`--limit`. -For example:: - - $ openstack cluster policy list --limit 1 - +----------+------+----------------------------+---------------------+ - | id | name | type | created_at | - +----------+------+----------------------------+---------------------+ - | 239d7212 | dp01 | senlin.policy.deletion-1.0 | 2015-07-11T04:24:34 | - +----------+------+----------------------------+---------------------+ - -Yet another option you can specify is the ID of a policy object after which -you want to see the list starts. In other words, you don't want to see those -policies with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster policy list --limit 1 \ - --marker 239d7212-6196-4a89-9446-44d28717d7de - -Combining the :option:`--marker` option and the :option:`--limit` option -enables you to do pagination on the results returned from the server. - - -Creating a Policy -~~~~~~~~~~~~~~~~~ - -When creating a new policy object, you need a "spec" file in YAML format. You -may want to check the :command:`openstack cluster policy type show` command in -:ref:`ref-policy-types` for the property names and types for a specific -:term:`Policy Type`. For example, the following is a spec for the policy type -``senlin.policy.deletion`` (the source can be found in the -:file:`examples/policies/deletion_policy.yaml` file):: - - # Sample deletion policy that can be attached to a cluster. - type: senlin.policy.deletion - version: 1.0 - properties: - # The valid values include: - # OLDEST_FIRST, OLDEST_PROFILE_FIRST, YOUNGEST_FIRST, RANDOM - criteria: OLDEST_FIRST - - # Whether deleted node should be destroyed - destroy_after_deletion: True - - # Length in number of seconds before the actual deletion happens - # This param buys an instance some time before deletion - grace_period: 60 - - # Whether the deletion will reduce the desired capability of - # the cluster as well. - reduce_desired_capacity: False - -The properties in this spec file are specific to the ``senlin.policy.deletion`` -policy type. To create a policy object using this "spec" file, you can use the -following command:: - - $ cd /opt/stack/senlin/examples/policies - $ openstack cluster policy create --spec deletion_policy.yaml dp01 - +------------+-----------------------------------------------------------+ - | Field | Value | - +------------+-----------------------------------------------------------+ - | created_at | None | - | data | {} | - | domain_id | None | - | id | c2e3cd74-bb69-4286-bf06-05d802c8ec12 | - | location | None | - | project_id | 42d9e9663331431f97b75e25136307ff | - | name | dp01 | - | spec | { | - | | "version": 1.0, | - | | "type": "senlin.policy.deletion", | - | | "description": "A policy for choosing victim node(s).", | - | | "properties": { | - | | "destroy_after_deletion": true, | - | | "grace_period": 60, | - | | "reduce_desired_capacity": false, | - | | "criteria": "OLDEST_FIRST" | - | | } | - | | } | - | type | None | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+-----------------------------------------------------------+ - - -Showing the Details of a Policy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the :command:`openstack cluster policy show` command to show the -properties of a policy. You need to provide an identifier to the command -line to indicate the policy object you want to examine. The identifier can be -the ID, the name or the "short ID" of a policy object. For example:: - - $ openstack cluster policy show dp01 - +------------+------------------------------------------------------------+ - | Field | Value | - +------------+------------------------------------------------------------+ - | created_at | 2015-07-11T04:24:34 | - | data | {} | - | domain_id | None | - | id | c2e3cd74-bb69-4286-bf06-05d802c8ec12 | - | location | None | - | name | dp01 | - | project_id | 42d9e9663331431f97b75e25136307ff | - | spec | { | - | | "version": 1.0, | - | | "type": "senlin.policy.deletion", | - | | "description": "A policy for choosing victim node(s).", | - | | "properties": { | - | | "destroy_after_deletion": true, | - | | "grace_period": 60, | - | | "reduce_desired_capacity": false, | - | | "criteria": "OLDEST_FIRST" | - | | } | - | | } | - | type | None | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+------------------------------------------------------------+ - -When there is no policy object matching the identifier, you will get an error -message. When there is more than one object matching the identifier, you will -get an error message as well. - - -Updating a Policy -~~~~~~~~~~~~~~~~~ - -After a policy object is created, you may want to change some properties of -it. You can use the :command:`openstack cluster policy update` to change the -"``name``" of a policy. For example, the following command renames a policy -object from "``dp01``" to "``dp01_bak``":: - - $ openstack cluster policy update --name dp01_bak dp01 - -If the named policy object could not be found or the parameter value fails the -validation, you will get an error message. - - -Deleting a Policy -~~~~~~~~~~~~~~~~~ - -When there are no clusters referencing a policy object, you can delete it from -the Senlin database using the following command:: - - $ openstack cluster policy delete dp01 - -Note that in this command you can use the name, the ID or the "short ID" to -specify the policy object you want to delete. If the specified criteria -cannot match any policy objects, you will get a ``ResourceNotFound`` exception. -If more than one policy matches the criteria, you will get a ``MultipleChoices`` -exception. - -See Also -~~~~~~~~ - -The list below provides links to documents related to the creation and usage -of policy objects. - -* :doc:`Working with Policy Types ` -* :ref:`Affinity Policy ` -* :ref:`Batch Policy ` -* :ref:`Deletion Policy ` -* :ref:`Health Policy ` -* :ref:`Load-Balancing Policy ` -* :ref:`Region Placement Policy ` -* :ref:`Scaling Policy ` -* :ref:`Zone Placement Policy ` -* :doc:`Managing the Bindings between Clusters and Policies ` -* :doc:`Browsing Events ` diff --git a/doc/source/user/policy_types.rst b/doc/source/user/policy_types.rst deleted file mode 100644 index 83c6f1f89..000000000 --- a/doc/source/user/policy_types.rst +++ /dev/null @@ -1,208 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-policy-types: - -============ -Policy Types -============ - -Concept -~~~~~~~ - -A :term:`Policy Type` is an abstract specification of the rules to be checked -and/or enforced when an :term:`Action` is performed on a cluster that -contains nodes of certain :term:`Profile Type`. - -A registry of policy types is built in memory when the Senlin engine -(:program:`senlin-engine`) is started. In future, Senlin will allow users to -provide additional policy type implementations as plug-ins to be loaded -dynamically. - -A policy type implementation dictates which fields are required, which fields -are optional and sometimes the constraints on field values. When a -:term:`Policy` is created by referencing this policy type, the fields are -assigned with concrete values. For example, a policy type -``senlin.policy.deletion`` conceptually specifies the properties required:: - - criteria: String # valid values - OLDEST_FIRST, YOUNGEST_FIRST, RANDOM - destroy_after_deletion: Boolean - grace_period: Integer - reduce_desired_capacity: Boolean - -The specification of a policy object of this policy type may look like -following:: - - type: senlin.policy.deletion - version: 1.0 - properties: - criteria: OLDEST_FIRST - destroy_after_deletion: True - grace_period: 120 - reduce_desired_capacity: True - - -Listing Policy Types -~~~~~~~~~~~~~~~~~~~~ - -Senlin server comes with some built-in policy types. You can check the list -of policy types using the following command:: - - $ openstack cluster policy type list - +--------------------------------+---------+----------------------------+ - | name | version | support_status | - +--------------------------------+---------+----------------------------+ - | senlin.policy.affinity | 1.0 | SUPPORTED since 2016.10 | - | senlin.policy.batch | 1.0 | EXPERIMENTAL since 2017.02 | - | senlin.policy.deletion | 1.0 | SUPPORTED since 2016.04 | - | senlin.policy.deletion | 1.1 | SUPPORTED since 2018.01 | - | senlin.policy.health | 1.0 | EXPERIMENTAL since 2017.02 | - | senlin.policy.loadbalance | 1.0 | SUPPORTED since 2016.04 | - | senlin.policy.loadbalance | 1.1 | SUPPORTED since 2018.01 | - | senlin.policy.region_placement | 1.0 | EXPERIMENTAL since 2016.04 | - | | | SUPPORTED since 2016.10 | - | senlin.policy.scaling | 1.0 | SUPPORTED since 2016.04 | - | senlin.policy.zone_placement | 1.0 | EXPERIMENTAL since 2016.04 | - | | | SUPPORTED since 2016.10 | - +--------------------------------+---------+----------------------------+ - - -The output is a list of policy types supported by the Senlin server. - - -Showing Policy Details -~~~~~~~~~~~~~~~~~~~~~~ - -Each :term:`Policy Type` has a schema for its *spec* (i.e. specification) -that describes the names and types of the properties that can be accepted. To -show the schema of a specific policy type along with other properties, you can -use the following command:: - - $ openstack cluster policy type show senlin.policy.deletion-1.1 - support_status: - '1.0': - - since: '2016.04' - status: SUPPORTED - '1.1': - - since: '2018.01' - status: SUPPORTED - id: senlin.policy.deletion-1.1 - location: null - name: senlin.policy.deletion-1.1 - schema: - criteria: - constraints: - - constraint: - - OLDEST_FIRST - - OLDEST_PROFILE_FIRST - - YOUNGEST_FIRST - - RANDOM - type: AllowedValues - default: RANDOM - description: Criteria used in selecting candidates for deletion - required: false - type: String - updatable: false - destroy_after_deletion: - default: true - description: Whether a node should be completely destroyed after deletion. Default - to True - required: false - type: Boolean - updatable: false - grace_period: - default: 0 - description: Number of seconds before real deletion happens. - required: false - type: Integer - updatable: false - hooks: - default: {} - description: Lifecycle hook properties - required: false - schema: - params: - default: {} - required: false - schema: - queue: - default: '' - description: Zaqar queue to receive lifecycle hook message - required: false - type: String - updatable: false - url: - default: '' - description: Url sink to which to send lifecycle hook message - required: false - type: String - updatable: false - type: Map - updatable: false - timeout: - default: 0 - description: Number of seconds before actual deletion happens. - required: false - type: Integer - updatable: false - type: - constraints: - - constraint: - - zaqar - - webhook - type: AllowedValues - default: zaqar - description: Type of lifecycle hook - required: false - type: String - updatable: false - type: Map - updatable: false - reduce_desired_capacity: - default: true - description: Whether the desired capacity of the cluster should be reduced along - the deletion. Default to True. - required: false - type: Boolean - updatable: false - -Here, each property has the following attributes: - -- ``default``: the default value for a property when not explicitly specified; -- ``description``: a textual description of the use of a property; -- ``required``: whether the property must be specified. Such kind of a - property usually doesn't have a ``default`` value; -- ``type``: one of ``String``, ``Integer``, ``Boolean``, ``Map`` or ``List``; -- ``updatable``: a boolean indicating whether a property is updatable. - -The default output from the :command:`policy-type-show` command is in YAML -format. You can choose to show the spec schema in JSON format by specifying -the :option:`-f json` option as shown below:: - - $ openstack cluster policy type show -f json senlin.policy.deletion-1.0 - -For information on how to manage the relationship between a policy and a -cluster, please refer to :ref:`ref-bindings`. - - -See Also -~~~~~~~~ - -Check the list below for documents related to the creation and usage of -:term:`Policy` objects. - -* :doc:`Creating Your Own Policy Objects ` -* :doc:`Managing the Binding between Cluster and Policy ` -* :doc:`Examining Actions ` -* :doc:`Browsing Events ` diff --git a/doc/source/user/policy_types/affinity.rst b/doc/source/user/policy_types/affinity.rst deleted file mode 100644 index a860e4ef0..000000000 --- a/doc/source/user/policy_types/affinity.rst +++ /dev/null @@ -1,134 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-affinity-policy: - -=============== -Affinity Policy -=============== - -The affinity policy is designed for senlin to leverage the *server group* API -in nova. Using this policy, you can specify whether the nodes in a cluster -should be collocated on the same physical machine (aka. "affinity") or they -should be spread onto as many physical machines as possible (aka. -"anti-affinity"). - -Currently, this policy can be used on nova server clusters only. In other -words, the type name of the cluster's profile has to be ``os.nova.server``. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.affinity_policy.AffinityPolicy - -Sample -~~~~~~ - -A typical spec for an affinity policy looks like the following example: - -.. literalinclude :: /../../examples/policies/affinity_policy.yaml - :language: yaml - -The affinity policy has the following properties: - -- ``servergroup.name``: An optional string that will be used as the name of - server group to be created. -- ``servergroup.policies``: A string indicating the policy to be used for - the server group. -- ``availability_zone``: Optional string specifying the availability zone for - the nodes to launch from. -- ``enable_drs_extension``: A boolean indicating whether VMware vSphere - extension should be enabled. - - -Validation -~~~~~~~~~~ - -When creating an affinity policy, the Senlin engine checks if the provided spec -is valid: - -- The value for ``servergroup.policies`` must be one of "``affinity``" or - "``anti-affinity``". The default value is "``affinity``" if omitted. - -- The value of ``availability_zone`` is the name of an availability zone known - to the Nova compute service. - - -Server Group Name -~~~~~~~~~~~~~~~~~ - -Since the ``os.nova.server`` profile type may contain ``scheduler_hints`` -which has server group specified, the affinity policy will behave differently -based on different settings. - -If the profile used by a cluster contains a ``scheduler_hints`` property (as -shown in the example), the Senlin engine checks if the specified group name -("``group_135``" in this case) is actually known to the Nova compute service -as a valid server group. The server group name from the profile spec will -take precedence over the ``servergroup.name`` value in the policy spec. - -.. code-block:: yaml - - type: os.nova.server - version: 1.0 - properties: - flavor: m1.small - ... - scheduler_hints: - group: group_135 - -If the ``group`` value is found to be a valid server group name, the Senlin -engine will try compare if the policies specified for the nova server group -matches that specified in the affinity policy spec. If the policies don't -match, the affinity policy won't be able to be attached to the cluster. - -If the profile spec doesn't contain a ``scheduler_hints`` property or the -``scheduler_hints`` property doesn't have a ``group`` value, the Senlin engine -will use the ``servergroup.name`` value from the affinity policy spec, if -provided. If the policy spec also failed to provide a group name, the Senlin -engine will try to create a server group with a random name, e.g. -"``server_group_x2mde78a``". The newly created server group will be deleted -automatically when you detach the affinity policy from the cluster. - - -Availability Zone Name -~~~~~~~~~~~~~~~~~~~~~~ - -The spec property ``availability_zone`` is optional, no matter the value for -``enable_drs_extension`` is specified or not or what value it is assigned. -However, if the ``availability_zone`` property does have a value, it will have -an impact on the placement of newly created nodes. This subsection discusses -the cases when DRS extension is not enabled. - -In the case that DRS extension is not enabled and the ``availability_zone`` -property doesn't have a value. Senlin engine won't assign an availability zone -for newly created nodes. - -By contrast, if the ``availability_zone`` property does have a value and it -has been validated to be name of an availability zone known to Nova, all newly -created nodes will be created into the specified availability zone. - - -DRS Extension -~~~~~~~~~~~~~ - -The property ``enable_drs_extension`` tells Senlin engine that the affinity -would be enforced by the VMware vSphere extension. In this case, the value of -the ``availability_zone`` property will be used to search for a suitable -hypervisor to which new nodes are scheduled. - -All newly created nodes in the cluster, when an affinity policy is attached -and enabled, will be scheduled to an availability zone named -``:`` where ```` is the value of ``availability_zone`` and -```` is the hostname of a selected DRS hypervisor. diff --git a/doc/source/user/policy_types/batch.rst b/doc/source/user/policy_types/batch.rst deleted file mode 100644 index a7bd5d1d2..000000000 --- a/doc/source/user/policy_types/batch.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-batch-policy: - -============ -Batch Policy -============ - -The batch policy is designed to automatically group a large number of -operations into smaller batches so that the service interruption can be better -managed and there won't be flood of service requests sending to any other -services that will form a DOS (denial-of-service) attack. - -Currently, this policy is applicable to clusters of all profile types and it -is enforced when cluster is updated. The development team is still looking -for an elegant solution that can regulate the resource creation requests. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.batch_policy.BatchPolicy - -Sample -~~~~~~ - -Below is a typical spec for a batch policy: - -.. literalinclude :: /../../examples/policies/batch_policy.yaml - :language: yaml - -The ``min_in_service`` property specifies the minimum number of nodes to be -kept in ACTIVE status. This is mainly for cluster update use cases. The -other property ``max_batch_size`` specifies the number of nodes to be updated -in each batch. This property is mainly used to ensure that batch requests -are still within the processing capability of a backend service. - -Between each batch of service requests, you can specify an interval in the -unit of seconds using the ``pause_time`` property. This can be used to ensure -that updated nodes are fully active to provide services, for example. diff --git a/doc/source/user/policy_types/deletion.rst b/doc/source/user/policy_types/deletion.rst deleted file mode 100644 index e9db256ed..000000000 --- a/doc/source/user/policy_types/deletion.rst +++ /dev/null @@ -1,183 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-deletion-policy: - -=============== -Deletion Policy -=============== - -The deletion policy is provided to help users control the election of victim -nodes when a cluster is about to be shrank. In other words, when the size of -a cluster is to be decreased, which node(s) should be removed first. - -Currently, this policy is applicable to clusters of all profile types and it -is enforced when the cluster's size is about to be reduced. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.deletion_policy.DeletionPolicy - -Sample -~~~~~~ - -Below is a typical spec for a deletion policy: - -.. literalinclude :: /../../examples/policies/deletion_policy.yaml - :language: yaml - -The valid values for the "``criteria`` property include: - -- ``OLDEST_FIRST``: always select node(s) which were created earlier than - other nodes. - -- ``YOUNGEST_FIRST``: always select node(s) which were created recently - instead of those created earlier. - -- ``OLDEST_PROFILE_FIRST``: compare the profile used by each individual nodes - and select the node(s) whose profile(s) were created earlier than others. - -- ``RANDOM``: randomly select node(s) from the cluster for deletion. This is - the default criteria if omitted. - -.. NOTE:: - - There is an implicit rule (criteria) when electing victim nodes. Senlin - engine always rank those nodes which are not in ACTIVE state or which are - marked as tainted before others. - -There are more several actions that can trigger a deletion policy. Some of -them may already carry a list of candidates to remove, e.g. -``CLUSTER_DEL_NODES`` or ``NODE_DELETE``; others may only carry a number of -nodes to remove, e.g. ``CLUSTER_SCALE_IN`` or ``CLUSTER_RESIZE``. For actions -that already have a list of candidates, the deletion policy will respect the -action inputs. The election of victims only happens when no such candidates -have been identified. - - -Deletion vs Destroy -~~~~~~~~~~~~~~~~~~~ - -There are cases where you don't want the node(s) removed from a cluster to be -destroyed. Instead, you prefer them to become "orphan" nodes so that in future -you can quickly add them back to the cluster without having to create new -nodes. - -If this is your situation, you may want to set ``destroy_after_deletion`` to -``false``. Senlin engine won't delete the node(s) after removing them from the -cluster. - -The default behavior is to delete (destroy) the node(s) after they are -deprived of their cluster membership. - - -Grace Period -~~~~~~~~~~~~ - -Another common scenario is to grant a node a period of time for it to shutdown -gracefully. Even if a node doesn't have a builtin logic to perform a graceful -shutdown, granting them some extra time may still help ensure the resources -they were using have been properly released. - -The default value for ``grace_period`` property is 0, which means the node -deletion happens as soon as it is removed from the cluster. You can customize -this value according to your need. Note that the grace period will be granted -to all node(s) deleted. When setting this value to a large number, be sure -it will not exceed the typical timeout value for action execution. Or else the -node deletion will be a failure. - - -Reduce Desired Capacity or Not -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In most cases, users would anticipate the "desired_capacity" of a cluster be -reduced when there are nodes removed from it. Since the victim selection -algorithm always pick nodes in non-ACTIVE status over ACTIVE ones, you can -actually remove erroneous nodes by taking advantage of this rule. - -For example, there are 4 nodes in a cluster and 2 of them are known to be in -inactive status. You can use the command :command:`openstack cluster members -del` to remove the bad nodes. If you have a deletion policy attached to the -cluster, you get a chance to tell the Senlin engine that you don't want to -change the capacity of the cluster. Instead, you only want the bad nodes -removed. With the help of other cluster health related commands, you can -quickly recover the cluster to a healthy status. You don't have to change the -desired capacity of the cluster to a smaller value and then change it back. - -If this is your use case, you can set ``reduce_desired_capacity`` to ``false`` -in the policy spec. The cluster's desired capacity won't be changed after -cluster membership is modified. - - -Lifecycle Hook -~~~~~~~~~~~~~~ - -If there is a need to receive notification of a node deletion, you can -specify a lifecycle hook in the deletion policy: - -.. code-block:: yaml - - type: senlin.policy.deletion - version: 1.1 - properties: - hooks: - type: 'zaqar' - timeout: 120 - params: - queue: 'my_queue' - -The valid values for the ``type`` are: - -- ``zaqar``: send message to zaqar queue. The name of the zaqar must be - specified in ``queue`` property. - -- ``webhook``: send message to webhook URL. The URL of the webhook must be - specified in ``url`` property. - -``timeout`` property specifies the number of seconds to wait before the -actual node deletion happens. This timeout can be preempted by calling -complete lifecycle hook API. - -.. NOTE:: - - Hooks of type ``webhook`` will be supported in a future version. Currently - only hooks of type ``zaqar`` are supported. - - -Deleting Nodes Across Regions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -With the help of :ref:`ref-region-policy`, you will be able to distribute -a cluster's nodes into different regions as instructed. However, when you are -removing nodes from more than one regions, the same distribution rule has to -be respected as well. - -When there is a region placement policy in effect, the deletion policy will -first determine the number of nodes to be removed from each region. Then in -each region, the policy performs a victim election based on the criteria you -specified in the policy spec. - - -Deleting Nodes Across Availability Zones -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Similarly, when there is a zone placement policy attached to the cluster in -question, nodes in the cluster may get distributed across a few availability -zones based on a preset algorithm. - -The deletion policy, when triggered, will first determine the number for nodes -to be removed from each availability zone. Then it proceeds to elect victim -nodes based on the criteria specified in the policy spec within each -availability zone. diff --git a/doc/source/user/policy_types/health.rst b/doc/source/user/policy_types/health.rst deleted file mode 100644 index e16e743ab..000000000 --- a/doc/source/user/policy_types/health.rst +++ /dev/null @@ -1,128 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-health-policy: - -============= -Health Policy -============= - -The health policy is designed for Senlin to detect cluster node failures and -to recover them in a way customizable by users. The health policy is not -meant to be an universal solution that can solve all problems related to -high-availability. However, the ultimate goal for the development team is to -provide an auto-healing framework that is usable, flexible, extensible for -most deployment scenarios. - -The policy type is currently applicable to clusters whose profile type is one -of ``os.nova.server`` or ``os.heat.stack``. This could be extended in future. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.health_policy.HealthPolicy - -Sample -~~~~~~ - -A typical spec for a health policy looks like the following example: - -.. literalinclude :: /../../examples/policies/health_policy_poll.yaml - :language: yaml - -There are two groups of properties (``detection`` and ``recovery``), each of -which provides information related to the failure detection and the failure -recovery aspect respectively. - -For failure detection, you can specify a detection mode that can be one of the -following two values: - -- ``NODE_STATUS_POLLING``: Senlin engine (more specifically, the health - manager service) is expected to poll each and every nodes periodically to - find out if they are "alive" or not. - -- ``NODE_STATUS_POLL_URL``: Senlin engine (more specifically, the health - manager service) is expected to poll the specified URL periodically to - find out if a node is considered healthy or not. - -- ``LIFECYCLE_EVENTS``: Many services can emit notification messages on the - message queue when configured. Senlin engine is expected to listen to these - events and react to them appropriately. - -It is possible to combine ``NODE_STATUS_POLLING`` and ``NODE_STATUS_POLL_URL`` -detections by specifying multiple detection modes. In the case of multiple -detection modes, Senlin engine tries each detection type in the order -specified. The behavior of a failed health check in the case of multiple -detection modes is specified using ``recovery_conditional``. - -``LIFECYCLE_EVENTS`` cannot be combined with any other detection type. - -All detection types can carry an optional map of ``options``. When the -detection type is set to "``NODE_STATUS_POLL_URL``", for example, you can -specify a value for ``poll_url`` property to specify the URL to be used for -health checking. - -As the policy type implementation stabilizes, more options may be added later. - -For failure recovery, there are currently two properties: ``actions`` and -``fencing``. The ``actions`` property takes a list of action names and an -optional map of parameters specific to that action. For example, the -``REBOOT`` action can be accompanied with a ``type`` parameter that indicates -if the intended reboot operation is a soft reboot or a hard reboot. - -.. note:: - - The plan for recovery actions is to support a list of actions which can be - tried one by one by the Senlin engine. Currently, you can specify only - *one* action due to implementation limitation. - - Another extension to the recovery action is to add triggers to user provided - workflows. This is also under development. - - -Validation -~~~~~~~~~~ - -Due to implementation limitation, currently you can only specify *one* action -for the ``recovery.actions`` property. This constraint will be removed soon -after the support to action list is completed. - - -Fencing -~~~~~~~ - -Fencing may be an important step during a reliable node recovery process. -Without fencing, we cannot ensure that the compute, network and/or storage -resources are in a consistent, predictable status. However, fencing is very -difficult because it always involves an out-of-band operation to the resource -controller, for example, an IPMI command to power off a physical host sent to -a specific IP address. - -Currently, the health policy only supports the fencing of virtual machines by -forcibly delete it before taking measures to recover it. - - -Snapshots -~~~~~~~~~ - -There have been some requirements to take snapshots of a node before recovery -so that the recovered node(s) will resume from where they failed. This feature -is also on the TODO list for the development team. - - -References -~~~~~~~~~~ - -For more detailed information on how the health policy work, please check -:doc:`Health Policy V1.1 <../../contributor/policies/health_v1>` \ No newline at end of file diff --git a/doc/source/user/policy_types/load_balancing.rst b/doc/source/user/policy_types/load_balancing.rst deleted file mode 100644 index 68bc76a48..000000000 --- a/doc/source/user/policy_types/load_balancing.rst +++ /dev/null @@ -1,295 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-lb-policy: - -===================== -Load-Balancing Policy -===================== - -The load-balancing policy is an encapsulation of the LBaaS v2 service that -distributes the network load evenly among members in a pool. Users are in -general not interested in the implementation details although they have a -strong requirement of the features provided by a load-balancer, such as -load-balancing, health-monitoring etc. - -The load-balancing policy is designed to be applicable to a cluster of virtual -machines or some variants or extensions of basic virtual machines. Currently, -Senlin only supports the load balancing for Nova servers. Future revisions may -extend this to more types of clusters. - -Before using this policy, you will have to make sure the LBaaS v2 service is -installed and configured properly. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.lb_policy.LoadBalancingPolicy - -Sample -~~~~~~ - -The design of the load-balancing policy faithfully follows the interface and -properties exposed by the LBaaS v2 service. A sample spec is shown below: - -.. literalinclude :: /../../examples/policies/lb_policy.yaml - :language: yaml - -As you can see, there are many properties related to the policy. The good news -is that for most of them, there are reasonable default values. All properties -are optional except for the following few: - -- ``vip.subnet`` or ``vip.network``: These properties provides the name or ID - of the subnet or network on which the virtual IP (VIP) is allocated. At least - one (or both) of them must be specified. - -The following subsections describe each and every group of properties and the -general rules on using them. - -Note that you can create and configure load-balancers all by yourself when you -have a good reason to do so. However, by using the load-balancing policy, you -no longer have to manage the load-balancer's lifecycle manually and you don't -have to update the load-balancer manually when cluster membership changes. - - -Load Balancer Pools -~~~~~~~~~~~~~~~~~~~ - -The load balancer pool is managed automatically when you have a load-balancing -policy attached to a cluster. The policy automatically adds existing nodes to -the load balancer pool when attaching the policy. Later on, when new nodes are -added to the cluster (e.g. by cluster scaling) or existing nodes are removed -from the cluster, the policy will update the pool's status to reflect the -change in membership. - -Each pool is supposed to use the same protocol and the same port number for -load sharing. By default, the protocol (i.e. ``pool.protocol``) is set to -"``HTTP``" which can be customized to "``HTTPS``" or "``TCP``" in your setup. -The default port number is 80, which also can be modified to suit your service -configuration. - -All nodes in a pool are supposed to reside on the same subnet, and the subnet -specified in the ``pool.subnet`` property must be compatible to the subnets of -existing nodes. - -The LBaaS service is capable of load balance among nodes in different ways -which are collectively called the ``lb_method``. Valid values for this -property are: - -- ``ROUND_ROBIN``: The load balancer will select a node for workload handling - on a round-robin basis. Each node gets an equal pressure to handle workloads. - -- ``LEAST_CONNECTIONS``: The load balancer will choose a node based on the - number of established connections from client. The node will the lowest - number of connections will be chosen. - -- ``SOURCE_IP``: The load balancer will compute hash values based on the IP - addresses of the clients and the server and then use the hash value for - routing. This ensures the requests from the same client always go to the - same server even in the face of broken connections. - -The ``pool.admin_state_up`` property for the most time can be safely ignored. -It is useful only when you want to debug the details of a load-balancer. - -The last property that needs some attention is ``pool.session_persistence`` -which is used to persist client sessions even if the connections may break now -and then. There are three types of session persistence supported: - -- ``SOURCE_IP``: The load balancer will try resume a broken connection based - on the client's IP address. You don't have to configure the ``cookie_name`` - property in this case. - -- ``HTTP_COOKIE``: The load balancer will check a named, general HTTP cookie - using the name specified in the ``cookie_name`` property and then resume the - connection based on the cookie contents. - -- ``APP_COOKIE``: The load balancer will check the application specific cookie - using the name specified in the ``cookie_name`` and resume connection based - on the cookie contents. - - -Virtual IP -~~~~~~~~~~ - -The Virtual IP (or "VIP" for short) refers to the IP address visible from the -client side. It is the single IP address used by all clients to access the -application or service running on the pool nodes. You have to specify a value -for either the ``vip.subnet`` or ``vip.network`` property even though you don't -have a preference about the actual VIP allocated. However, if you do have a -preferred VIP address to use, you will need to provide both a -``vip.subnet``/``vip.network`` and a ``vip.address`` value. -The LBaaS service will check if both values are valid. - -Note that if you choose to omit the ``vip.address`` property, the LBaaS -service will allocate an address for you from the either the provided subnet, -or a subnet automatically chosen from the provided network. You will -have to check the cluster's ``data`` property after the load-balancing policy -has been successfully attached to your cluster. For example: - -.. code-block:: console - - $ openstack cluster show my_cluster - - +------------------+------------------------------------------------+ - | Field | Value | - +------------------+------------------------------------------------+ - | created_at | 2017-01-21T06:25:42Z | - | data | { | - | | "loadbalancers": { | - | | "1040ad51-87e8-4579-873b-0f420aa0d273": { | - | | "vip_address": "11.22.33.44" | - | | } | - | | } | - | | } | - | dependents | {} | - | desired_capacity | 10 | - | domain_id | None | - | id | 30d7ef94-114f-4163-9120-412b78ba38bb | - | ... | ... | - -The output above shows you that the cluster has a load-balancer created for -you and the VIP used to access that cluster is "11.22.33.44". - -Similar to the pool properties discussed in previous subsection, for the -virtual IP address, you can also specify the expected network protocol and -port number to use where clients will be accessing it. The default value for -``vip.protocol`` is "``HTTP``" and the default port number is 80. Both can be -customized to suit your needs. - -Another useful feature provided by the LBaaS service is the cap of maximum -number of connections per second. This is a limit set on a per-VIP basis. By -default, Senlin sets the ``vip.connection_limit`` to -1 which means there is -no upper bound for connection numbers. You may want to customize this value -to restrict the number of connection requests per second for your service. - -The last property in the ``vip`` group is ``admin_state_up`` which is default -to "``True``". In some rare cases, you may want to set it to "``False``" for -the purpose of debugging. - - -Health Monitor -~~~~~~~~~~~~~~ - -Since a load-balancer sits in front of all nodes in a pool, it has to be aware -of the health status of all member nodes so as to properly and reliably route -client requests to the active nodes for processing. The problem is that there -are so many different applications or web services each exhibit a different -runtime behavior. It is hard to come up with an approach generic and powerful -enough to detect all kinds of node failures. - -The LBaaS that backs the Senlin load-balancing policy supports four types of -node failure detections, all generic enough to serve a wide range of -applications. - -- ``PING``: The load-balancer pings every pool members to detect if they are - still reachable. - -- ``TCP``: The load-balancer attempts a telnet connection to the protocol port - configured for the pool thus determines if a node is still alive. - -- ``HTTP``: The load-balancer attempts a HTTP request (specified in the - ``health_monitor.http_method`` property) to specific URL (configured in the - ``health_monitor.url_path`` property) and then determines if a node is still - active by comparing the result code to the expected value (configured in the - ``health_monitor.expected_codes``. - -- ``HTTPS``: The load-balancer checks nodes' aliveness by sending a HTTPS - request using the same values as those in the case of ``HTTP``. - -The ``health_monitor.expected_codes`` field accepts a string value, but you -can specify multiple HTTP status codes that can be treated as an indicator of -node's aliveness: - -- A single value, such as ``200``; - -- A list of values separated by commas, such as ``200, 202``; - -- A range of values, such as ``200-204``. - -To make the failure detection reliable, you may want to check and customize -the following properties in the ``health_monitor`` group. - -- ``timeout``: The maximum time in milliseconds that a monitor waits for a - response from a node before it claims the node unreachable. The default is - 5. - -- ``max_retries``: The number of allowed connection failures before the monitor - concludes that node inactive. The default is 3. - -- ``delay``: The time in milliseconds between sending two consecutive requests - (probes) to pool members. The default is 10. - -A careful experimentation is usually warranted to come up with reasonable -values for these fields in a specific environment. - - -LB Status Timeout -~~~~~~~~~~~~~~~~~ - -Due to the way the LBaaS service is implemented, creating load balancers and -health monitors, updating load balancer pools all take considerable time. In -some deployment scenarios, it make take the load balancer several minutes to -become operative again after an update operation. - -The ``lb_status_timeout`` option is provided since version 1.1 of the -load-balancing policy to mitigate this effect. In real production environment, -you are expected to set this value based on some careful dry-runs. - - -Availability Zone -~~~~~~~~~~~~~~~~~ - -Load balancers have their own availability zones, similar to the compute -service. - -The ``availability_zone`` option is provided since version 1.2 of the -load-balancing policy, to allow the user to choose which availability zone to -use when provisioning the load balancer. - -Validation -~~~~~~~~~~ - -When creating a new load-balancing policy object, Senlin checks if the subnet -and/or network provided are actually known to the Neutron network service. If -they are not, the policy creation will fail. - - -Updates to the Cluster and Nodes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When a load-balancing policy has been successfully attached to a cluster, you -can observe the VIP address from the ``data`` property of the cluster, as -described above. - -You can also check the ``data`` property of nodes in the cluster. Each node -will have a ``lb_member`` key in its data property indicating the ID of the -said node in the load-balancer pool. - -When the load-balancing policy is detached from a cluster successfully. These -data will be automatically removed, and the related resources created at the -LBaaS side are deleted transparently. - - -Node Deletion -~~~~~~~~~~~~~ - -In the case where there is a :ref:`ref-deletion-policy` attached to the same -cluster, the deletion policy will elect the victims to be removed from a -cluster before the load-balancing policy gets a chance to remove those nodes -from the load-balancing pool. - -However, when there is no such a deletion policy in place, the load-balancing -policy will try to figure out the number of nodes to delete (if needed) and -randomly choose the victim nodes for deletion. diff --git a/doc/source/user/policy_types/region_placement.rst b/doc/source/user/policy_types/region_placement.rst deleted file mode 100644 index 9b3095e6a..000000000 --- a/doc/source/user/policy_types/region_placement.rst +++ /dev/null @@ -1,92 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-region-policy: - -======================= -Region Placement Policy -======================= - -The region placement policy is designed to enable the deployment and management -resource pools across multiple regions. Note that the current design is only -concerned with a single keystone endpoint for multiple regions, interacting -with keystone federation is planned for future extension. - -The policy is designed to work with clusters of any profile types. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.region_placement.RegionPlacementPolicy - -Sample -~~~~~~ - -A typical spec for a region placement policy is shown in the following sample: - -.. literalinclude :: /../../examples/policies/placement_region.yaml - :language: yaml - -In this sample spec, two regions are provided, namely "``region_1``" and -"``region_2``". There are "weight" and "cap" attributes associated with them, -both of which are optional. - -The "``weight``" value is to be interpreted as a relative number. The value -assigned to one region has to be compared to those assigned to other regions -for an assessment. In the sample shown above, ``region_1`` and ``region_2`` -are assigned weights with 100 and 200 respectively. This means that among -every 3 nodes creation, one is expected to be scheduled to ``region_1`` and -the other 2 is expected to be scheduled to ``region_2``. Put it in another -way, the chance for ``region_2`` receiving a node creation request is twice of -that for ``region_1``. - -The "``weight``" value has to be a positive integer, if specified. The default -value is 100 for all regions whose weight is omitted. - -There are cases where each region has different amounts of resources -provisioned so their capacity for creating and running nodes differ. To deal -with these situations, you can assign a "``cap``" value to such a region. This -effectively tells the Senlin engine that a region is not supposed to -accommodate nodes more than the specified number. - - -Validation -~~~~~~~~~~ - -When creating a region placement policy, the Senlin engine validates whether -the region names given are all known to be available regions by the keystone -identity service. Do NOT pass in an invalid region name and hope Senlin can -create a region for you. - -Later on when the policy is triggered by node creation or deletion, it always -validates if the provided regions are still valid and usable. - - -Node Distribution -~~~~~~~~~~~~~~~~~ - -After a region placement policy is attached to a cluster and enabled, all -future node creations (by cluster scaling for example) will trigger an -evaluation of the policy. - -The region placement policy will favor regions with highest weight value when -selecting a region for nodes to be created. It will guarantee that no more -than the provided ``cap`` number of nodes will be allocated to a specific -region. - -Node distribution is calculated not only when new nodes are created and added -to a cluster, it is also calculated when existing nodes are to be removed from -the cluster. The policy will strive to maintain a distribution close to the -one computed from the weight distribution of all regions. diff --git a/doc/source/user/policy_types/scaling.rst b/doc/source/user/policy_types/scaling.rst deleted file mode 100644 index 0cbf8b6ce..000000000 --- a/doc/source/user/policy_types/scaling.rst +++ /dev/null @@ -1,158 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-scaling-policy: - -============== -Scaling Policy -============== - -The scaling policy is designed to supplement a cluster scaling request with -more detailed arguments based on user-provided rules. This policy type is -expected to be applicable on clusters of all profile types. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.scaling_policy.ScalingPolicy - -Sample -~~~~~~ - -A typical spec for a scaling policy is shown below: - -.. literalinclude :: /../../examples/policies/scaling_policy.yaml - :language: yaml - -You should pay special attentions to the ``event`` property, whose valid -values include "``CLUSTER_SCALE_IN``" and "``CLUSTER_SCALE_OUT``". One -implication of this design is that you have to attach two policies to the -same cluster if you want to control the scaling behavior both when you are -expanding the cluster and when you are shrinking it. You can not control the -scaling behavior in both directions using the same policy. - -Senlin has carefully designed the builtin policy types so that for scaling -policies, you can attach more than one instance of the same policy type but -you may get an error when you are attempting to attach two policies of another -type (say ``senlin.policy.deletion``) to the same cluster. - -The value of the ``event`` property indicates when the policy will be checked. -A policy with ``event`` set to "``CLUSTER_SCALE_IN``" will be checked when and -only when a corresponding action is triggered on the cluster. A policy with -``event`` set to "``CLUSTER_SCALE_OUT``" will be checked when and only when -a corresponding action is triggered. If the cluster is currently processing a -scaling action it will not accept another scaling action until the current -action has been processed and cooldown has been observed. - -For both types of actions that can triggered the scaling policy, there are -always three types of adjustments to choose from as listed below. The type -of adjustment determines the interpretation of the ``adjustment.number`` value. - -- ``EXACT_CAPACITY``: the value specified for ``adjustment.number`` means the - new capacity of the cluster, so it has to be a non-negative integer. - -- ``CHANGE_IN_CAPACITY``: the value specified for ``adjustment.number`` is the - number of nodes to be added or removed. This means the value has to be a - non-negative number as well. - -- ``CHANGE_IN_PERCENTAGE``: the value specified for ``adjustment.number`` will - be interpreted as the percentage of capacity changes. This value has to be - a non-negative floating-point value. - -For example, in the sample spec shown above, when a ``CLUSTER_SCALE_IN`` -request is received, the policy will remove 10% of the total number of nodes -from the cluster. - - -Dealing With Percentage -~~~~~~~~~~~~~~~~~~~~~~~ - -As stated above, when ``adjustment.type`` is set to ``CHANGE_IN_PERCENTAGE``, -the value of ``adjustment.number`` can be a floating-point value, interpreted -as a percentage of the current node count of the cluster. - -In many cases, the result of the calculation may be a floating-point value. -For example, if the current capacity of a cluster is 5 and the -``adjustment.number`` is set to 30%, the compute result will be 1.5. In this -situation, the scaling policy rounds the number up to its adjacent integer, -i.e. 2. If the ``event`` property has "``CLUSTER_SCALE_OUT``" as its value, -the policy decision is to add 2 nodes to the cluster. If on the other hand the -``event`` is set to "``CLUSTER_SCALE_IN``", the policy decision is to remove -2 nodes from the cluster. - -There are other corner cases to consider as well. When the compute result is -less than 0.1, for example, it becomes a question whether the Senlin engine -should add (or remove) nodes. The property ``adjustment.min_step`` is designed -to make this decision. After policy has got the compute result, it will check -if it is less than the specified ``adjustment.min_step`` value and it will use -the ``adjustment.min_step`` value if so. - - -Best Effort Scaling -~~~~~~~~~~~~~~~~~~~ - -In many auto-scaling usage scenarios, the policy decision may break the size -constraints set on the cluster. As an example, a cluster has its ``min_size`` -set to 5, ``max_size`` set to 10 and its current capacity is 7. If the policy -decision is to remove 3 nodes from the cluster, we are in a dilemma. Removing -3 nodes will change the cluster capacity to 4, which is not allowed by the -cluster. If we don't remove 3 nodes, we are not respecting the policy -decision. - -The ``adjustment.best_effort`` property is designed to mitigate this situation. -When it is set to False, the scaling policy will strictly conform to the rules -set. It will reject the scaling request if the computed cluster capacity will -break its size constraints. However, if ``adjustment.best_effort`` is set to -True, the scaling policy will strive to compute a sub-optimal number which -will not break the cluster's size constraints. In the above example, this -means the policy decision will be "remove 2 nodes from the cluster". In other -words, the policy at least will try partially ful-fill the scaling goal for -the sake of respecting the size constraint. - - -Cooldown -~~~~~~~~ - -In real-life cluster deployments, workload pressure fluctuates rapidly. During -this minute, it smells like there is a need to add 10 more nodes to handle the -bursting workload. During the next minute, it may turn out to be a false -alarm, the workload is rapidly decreasing. Since it is very difficult to -accurately predict the workload changes, if possible at all, an auto-scaling -engine is not supposed to react too prematurely to workload fluctuations. - -The ``cooldown`` property gives you a chance to specify an interval during -which the cluster will remain ignorant to scaling requests. Setting a large -value to this property will lead to a stable cluster, but the responsiveness -to urgent situation is also sacrificed. Setting a small value, on the -contrary, can meet the responsiveness requirement, but will also render the -cluster into a thrashing state where new nodes are created very frequently -only to be removed shortly. - -There is never a recommended value that suits all deployments. You will have -to try different values in your own environment and tune it for different -applications or services. - - -Interaction with Other Policies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The scaling policy is only tasked to decide the number of nodes to add or -remove. For newly added nodes, you will use other policies to determine where -they should be scheduled. For nodes to be deleted, you will use other polices -(e.g. the deletion policy) to elect the victim nodes. - -The builtin policies were designed carefully so that they can work happily -together or by themselves. - diff --git a/doc/source/user/policy_types/zone_placement.rst b/doc/source/user/policy_types/zone_placement.rst deleted file mode 100644 index e80230f0d..000000000 --- a/doc/source/user/policy_types/zone_placement.rst +++ /dev/null @@ -1,85 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-zone-policy: - -===================== -Zone Placement Policy -===================== - -The zone placement policy is designed to enable the deployment and management -resource pools across multiple availability zones. Note that the current design -is only concerned with the availability zones configured to Nova compute -service. Support to Cinder availability zones and Neutron availability zones -may be added in future when we have volume storage specific or network -specific profile types. - -The current implementation of the zone placement policy works with clusters of -Nova virtual machines only. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.zone_placement.ZonePlacementPolicy - -Sample -~~~~~~ - -A typical spec for a zone placement policy is exemplified in the following -sample: - -.. literalinclude :: /../../examples/policies/placement_zone.yaml - :language: yaml - -In this sample spec, two availability zones are provided, namely "``az_1``" and -"``az_2``". Each availability zone can have an optional "``weight``" attribute -associated with it. - -The "``weight``" value is to be interpreted as a relative number. The value -assigned to one zone has to be compared to those assigned to other zones for -an assessment. In the sample shown above, ``az_1`` and ``az_2`` are assigned -weights of 100 and 200 respectively. This means that among every 3 nodes -creation, one is expected to be scheduled to ``az_1`` and the other 2 are -expected to be scheduled to ``az_2``. In other words, the chance for ``az_2`` -receiving a node creation request is twice of that for ``az_1``. - -The "``weight``" value has to be a positive integer, if specified. The default -value is 100 for all zones whose weight is omitted. - - -Validation -~~~~~~~~~~ - -When creating a zone placement policy, the Senlin engine validates whether -the zone names given are all known to be usable availability zones by the Nova -compute service. Do NOT pass in an invalid availability zone name and hope -Senlin can create a zone for you. - -Later on when the zone placement policy is triggered upon node creation or node -deletion actions, it always validates if the provided availability zones are -still valid and usable. - - -Node Distribution -~~~~~~~~~~~~~~~~~ - -After a zone placement policy is attached to a cluster and enabled, all future -node creations (by cluster scaling for example) will trigger an evaluation of -the policy. Similarly, a node deletion action will also trigger an evaluation -of it because the policy's goal is to maintain the node distribution based on -the one computed from the weight distribution of all zones. - -The zone placement policy will favor availability zones with highest weight -values when selecting a zone for nodes to be created. diff --git a/doc/source/user/profile_types.rst b/doc/source/user/profile_types.rst deleted file mode 100644 index 78fe510c7..000000000 --- a/doc/source/user/profile_types.rst +++ /dev/null @@ -1,225 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-profile-types: - -============= -Profile Types -============= - -Concept -~~~~~~~ - -A :term:`Profile Type` can be treated as the meta-type of a :term:`Profile` -object. A registry of profile types is built in memory when Senlin engine -(:program:`senlin-engine`) is started. In future, Senlin will allow users to -provide additional profile type implementations as plug-ins to be loaded -dynamically. - -A profile type implementation dictates which fields are required. When a -profile is created by referencing this profile type, the fields are assigned -with concrete values. For example, a profile type can be ``os.heat.stack`` -that conceptually specifies the properties required: - -:: - - context: Map - template: Map - parameters: Map - files: Map - timeout: Integer - disable_rollback: Boolean - environment: Map - -A profile of type ``os.heat.stack`` may look like: - -:: - - # a spec for os.heat.stack - type: os.heat.stack - version: 1.0 - properties: - context: - region_name: RegionOne - template: - heat_template_version: 2014-10-16 - parameters: - length: Integer - resources: - rand: - type: OS::Heat::RandomString - properties: - len: {get_param: length} - outputs: - rand_val: - value: {get_attr: [rand, value]} - parameters: - length: 32 - files: {} - timeout: 60 - disable_rollback: True - environment: {} - - -Listing Profile Types -~~~~~~~~~~~~~~~~~~~~~ - -Senlin server comes with some built-in profile types. You can check the list -of profile types using the following command:: - - $ openstack cluster profile type list - +----------------------------+---------+----------------------------+ - | name | version | support_status | - +----------------------------+---------+----------------------------+ - | container.dockerinc.docker | 1.0 | EXPERIMENTAL since 2017.02 | - | os.heat.stack | 1.0 | SUPPORTED since 2016.04 | - | os.nova.server | 1.0 | SUPPORTED since 2016.04 | - +----------------------------+---------+----------------------------+ - -The output is a list of profile types supported by the Senlin server. - - -Showing Profile Details -~~~~~~~~~~~~~~~~~~~~~~~ - -Each :term:`Profile Type` has a schema for its *spec* (i.e. specification) -that describes the names and the types of properties that can be accepted. To -show the schema of a specific profile type along with other properties, you -can use the following command:: - - $ openstack cluster profile type show os.heat.stack-1.0 - support_status: - '1.0': - - since: '2016.04' - status: SUPPORTED - id: os.heat.stack-1.0 - location: null - name: os.heat.stack - schema: - context: - default: {} - description: A dictionary for specifying the customized context for - stack operations - required: false - type: Map - updatable: false - disable_rollback: - default: true - description: A boolean specifying whether a stack operation can be - rolled back. - required: false - type: Boolean - updatable: true - <... omitted ...> - timeout: - description: A integer that specifies the number of minutes that a - stack operation times out. - required: false - type: Integer - updatable: true - -Here, each property has the following attributes: - -- ``default``: the default value for a property when not explicitly specified; -- ``description``: a textual description of the use of a property; -- ``required``: whether the property must be specified. Such kind of a - property usually doesn't have a ``default`` value; -- ``type``: one of ``String``, ``Integer``, ``Boolean``, ``Map`` or ``List``; -- ``updatable``: a boolean indicating whether a property is updatable. - -The default output from the :command:`openstack cluster profile type show` -command is in YAML format. You can choose to show the spec schema in JSON -format by specifying the :option:`-f json` option as exemplified below:: - - $ openstack cluster profile type show -f json os.heat.stack-1.0 - { - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - }, - "name": "os.heat.stack", - "schema": { - "files": { - "default": {}, - "required": false, - "type": "Map", - "description": "Contents of files referenced by the template, if any.", - "updatable": true - }, - <... omitted ...> - "context": { - "default": {}, - "required": false, - "type": "Map", - "description": "A dictionary for specifying the customized context for stack operations", - "updatable": false - } - }, - "id": "os.heat.stack-1.0", - "location": null - } - - -Showing Profile Type Operations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Each :term:`Profile Type` has built-in operations, you can get the operations -of a profile type using the following command:: - - $ openstack cluster profile type ops os.heat.stack-1.0 - operations: - abandon: - description: Abandon a heat stack node. - required: false - type: Map - updatable: false - -Here, each property has the following attributes: - -- ``description``: a textual description of the use of a property; -- ``required``: whether the property must be specified. Such kind of a - property usually doesn't have a ``default`` value; -- ``type``: one of ``String``, ``Integer``, ``Boolean``, ``Map`` or ``List``; -- ``updatable``: a boolean indicating whether a property is updatable. - -The default output from the :command:`openstack cluster profile type ops` -command is in YAML format. You can choose to show the spec schema in JSON -format by specifying the :option:`-f json` option as exemplified below:: - - $ openstack cluster profile type ops -f json os.heat.stack-1.0 - { - "operations": { - "abandon": { - "required": false, - "type": "Map", - "description": "Abandon a heat stack node.", - "updatable": false - } - } - } - - -See Also -~~~~~~~~ - -Below is a list of links to the documents related to profile types: - -* :doc:`Managing Profile Objects ` -* :doc:`Creating and Managing Clusters ` -* :doc:`Creating and Managing Nodes ` -* :doc:`Managing Cluster Membership ` -* :doc:`Browsing Events ` diff --git a/doc/source/user/profile_types/docker.rst b/doc/source/user/profile_types/docker.rst deleted file mode 100644 index dc0ba2b05..000000000 --- a/doc/source/user/profile_types/docker.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-docker-profile: - -============== -Docker Profile -============== - -The docker profile instantiates nodes that are associated with docker container -instances. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.profiles.container.docker.DockerProfile - -Sample -~~~~~~ - -Below is a typical spec for a docker profile: - -.. literalinclude :: /../../examples/profiles/docker_container/docker_basic.yaml - :language: yaml \ No newline at end of file diff --git a/doc/source/user/profile_types/nova.rst b/doc/source/user/profile_types/nova.rst deleted file mode 100644 index e9c6b5f78..000000000 --- a/doc/source/user/profile_types/nova.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-nova-profile: - -============ -Nova Profile -============ - -The nova profile instantiates nodes that are associated with nova server -instances. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.profiles.os.nova.server.ServerProfile - -Sample -~~~~~~ - -Below is a typical spec for a nova profile: - -.. literalinclude :: /../../examples/profiles/nova_server/cirros_basic.yaml - :language: yaml \ No newline at end of file diff --git a/doc/source/user/profile_types/stack.rst b/doc/source/user/profile_types/stack.rst deleted file mode 100644 index 2fd1b721c..000000000 --- a/doc/source/user/profile_types/stack.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-stack-profile: - -============= -Stack Profile -============= - -The stack profile instantiates nodes that are associated with heat stack -instances. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.profiles.os.heat.stack.StackProfile - -Sample -~~~~~~ - -Below is a typical spec for a stack profile: - -.. literalinclude :: /../../examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml - :language: yaml diff --git a/doc/source/user/profiles.rst b/doc/source/user/profiles.rst deleted file mode 100644 index 087bd7546..000000000 --- a/doc/source/user/profiles.rst +++ /dev/null @@ -1,426 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-profiles: - -======== -Profiles -======== - -Concept -~~~~~~~ - -A :term:`Profile` is the mould used for creating a :term:`Node` to be managed -by the Senlin service. It can be treated as an instance of a -:term:`Profile Type` with a unique ID. A profile encodes the information -needed for node creation in a property named ``spec``. - -The primary job for a profile type implementation is to translate user provided -JSON data structure into information that can be consumed by a driver. A -driver will create/delete/update a physical object based on the information -provided. - - -Listing Profiles -~~~~~~~~~~~~~~~~ - -To examine the list of profile objects supported by the Senlin engine, you can -use the following command:: - - $ openstack cluster profile list - +----------+----------+--------------------+---------------------+ - | id | name | type | created_at | - +----------+----------+--------------------+---------------------+ - | 560a8f9d | myserver | os.nova.server-1.0 | 2015-05-05T13:26:00 | - | ceda64bd | mystack | os.heat.stack-1.0 | 2015-05-05T13:26:25 | - | 9b127538 | pstack | os.heat.stack-1.0 | 2015-06-25T12:59:01 | - +----------+----------+--------------------+---------------------+ - -Note that the first column in the output table is a *short ID* of a profile -object. Senlin command line use short IDs to save real estate on screen so -that more useful information can be shown on a single line. To show the *full -ID* in the list, you can add the :option:`--full-id` option to the command:: - - $ openstack cluster profile list --full-id - +-------------------+----------+--------------------+---------------------+ - | id | name | type | created_at | - +-------------------+----------+--------------------+---------------------+ - | 560a8f9d-7596-... | myserver | os.nova.server-1.0 | 2015-05-05T13:26:00 | - | ceda64bd-70b7-... | mystack | os.heat.stack-1.0 | 2015-05-05T13:26:25 | - | 9b127538-a675-... | pstack | os.heat.stack-1.0 | 2015-06-25T12:59:01 | - +-------------------+----------+--------------------+---------------------+ - -The ``id`` column above contains the full UUID of profiles. - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list profiles, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are profile properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the profiles using the ``name`` -property in descending order:: - - $ openstack cluster profile list --sort name:desc - -When sorting the list of profiles, you can use one of ``type``, ``name``, -``created_at`` and ``updated_at``. - - -Filtering the List ------------------- - -The :program:`openstack cluster profile list` command also provides options -for filtering the profile list at the server side. The option -:option:`--filters` can be used for this purpose. For example, the following -command filters the profile by the ``type`` field:: - - $ openstack cluster profile list --filter "type=os.heat.stack-1.0" - +----------+----------+--------------------+---------------------+ - | id | name | type | created_at | - +----------+----------+--------------------+---------------------+ - | ceda64bd | mystack | os.heat.stack-1.0 | 2015-05-05T13:26:25 | - | 9b127538 | pstack | os.heat.stack-1.0 | 2015-06-25T12:59:01 | - +----------+----------+--------------------+---------------------+ - -The option :option:`--filters` accepts a list of key-value pairs separated -by semicolon (``;``), where each key-value pair is expected to be of format -``=``. The valid keys for filtering include: ``name`` and -``type``. - - -Paginating the List -------------------- - -In case you have a huge collection of profile objects, you can limit the -number of profiles returned from Senlin server, using the option -:option:`--limit `. For example:: - - $ openstack cluster profile list --limit 1 - +----------+----------+--------------------+---------------------+ - | id | name | type | created_at | - +----------+----------+--------------------+---------------------+ - | 560a8f9d | myserver | os.nova.server-1.0 | 2015-05-05T13:26:00 | - +----------+----------+--------------------+---------------------+ - -Yet another option you can specify is the ID of a profile object after which -you want to see the list starts. In other words, you don't want to see those -profiles with IDs is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster profile list --limit 1 \ - --marker ceda64bd-70b7-4711-9526-77d5d51241c5 - +----------+--------+-------------------+---------------------+ - | id | name | type | created_at | - +----------+--------+-------------------+---------------------+ - | 9b127538 | pstack | os.heat.stack-1.0 | 2015-06-25T12:59:01 | - +----------+--------+-------------------+---------------------+ - - -Creating a Profile -~~~~~~~~~~~~~~~~~~ - -Before working with a :term:`Cluster` or a :term:`Node`, you will need a -:term:`Profile` object created with a profile type. To create a profile, you -will need a "spec" file in YAML format. For example, below is a simple spec -for the ``os.heat.stack`` profile type (the source can be found in the -:file:`/examples/profiles/heat_stack/random_string/ -heat_stack_random_string.yaml` file). - -:: - - type: os.heat.stack - version: 1.0 - properties: - name: random_string_stack - template: random_string_stack.yaml - context: - region_name: RegionOne - -The ``random_string_stack.yaml`` is the name of a Heat template file to be used -for stack creation. It is given here only as an example. You can -decide which properties to use based on your requirements. - -Now you can create a profile using the following command:: - - $ cd /opt/stack/senlin/examples/profiles/heat_stack/random_string - $ openstack cluster profile create \ - --spec heat_stack_random_string.yaml \ - my_stack - +------------+-------------------------------------------------------------+ - | Field | Value | - +------------+-------------------------------------------------------------+ - | created_at | 2015-07-01T03:13:23 | - | domain_id | None | - | id | c0389712-9c1a-4c58-8ba7-caa61b34b8b0 | - | location | None | - | metadata | {} | - | name | my_stack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | spec | +------------+--------------------------------------------+ | - | | | property | value | | - | | +------------+--------------------------------------------+ | - | | | version | 1.0 | | - | | | type | "os.heat.stack" | | - | | | properties | { | | - | | | | "files": { | | - | | | | "file:///...": "" | | - | | | | }, | | - | | | | "disable_rollback": true, | | - | | | | "template": { | | - | | | | "outputs": { | | - | | | | "result": { | | - | | | | "value": { | | - | | | | "get_attr": [ | | - | | | | "random", | | - | | | | "value" | | - | | | | ] | | - | | | | } | | - | | | | } | | - | | | | }, | | - | | | | "heat_template_version": "2014-10-16", | | - | | | | "resources": { | | - | | | | "random": { | | - | | | | "type": "OS::Heat::RandomString", | | - | | | | "properties": { | | - | | | | "length": 64 | | - | | | | } | | - | | | | } | | - | | | | }, | | - | | | | "parameters": { | | - | | | | "file": { | | - | | | | "default": { | | - | | | | "get_file": "file:///..." | | - | | | | }, | | - | | | | "type": "string" | | - | | | | } | | - | | | | } | | - | | | | }, | | - | | | | "parameters": {}, | | - | | | | "timeout": 60, | | - | | | | "environment": { | | - | | | | "resource_registry": { | | - | | | | "os.heat.server": "OS::Heat::Server" | | - | | | | } | | - | | | | }, | | - | | | | "context": { | | - | | | | "region_name": "RegionOne" | | - | | | | } | | - | | | | } | | - | | +------------+--------------------------------------------+ | - | type | os.heat.stack-1.0 | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+-------------------------------------------------------------+ - -From the outputs, you can see that the profile is created with a new ``id`` -generated. The ``spec`` property is dumped for the purpose of verification. - -Optionally, you can attach some key-value pairs to the new profile when -creating it. This data is referred to as the *metadata* for the profile:: - - $ openstack cluster profile create \ - --spec heat_stack_random_string.yaml \ - --metadata "author=Tom;version=1.0" \ - my_stack - - $ openstack cluster profile create \ - --spec heat_stack_random_string.yaml \ - --metadata author=Tom --metadata version=1.0 \ - my_stack - - -Showing the Details of a Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once there are profile objects in Senlin database, you can use the following -command to show the properties of a profile:: - - $ openstack cluster profile show myserver - +------------+---------------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------------+ - | created_at | 2015-07-01T03:18:58 | - | domain_id | None | - | id | 70a36cc7-9fc7-460e-98f6-d44e3302e604 | - | location | None | - | metadata | {} | - | name | my_server | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | spec | +------------+----------------------------------------+ | - | | | property | value | | - | | +------------+----------------------------------------+ | - | | | version | 1.0 | | - | | | type | "os.nova.server" | | - | | | properties | { | | - | | | | "key_name": "oskey", | | - | | | | "flavor": 1, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "private" | | - | | | | } | | - | | | | ], | | - | | | | "image": "cirros-0.3.2-x86_64-uec", | | - | | | | "name": "cirros_server" | | - | | | | } | | - | | +------------+----------------------------------------+ | - | type | os.nova.server-1.0 | - | update_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+---------------------------------------------------------+ - -Note that :program:`openstack cluster` command line accepts one of the -following values when retrieving a profile object: - -- name: the name of a profile; -- ID: the UUID of a profile; -- short ID: an "abbreviated version" of the profile UUID. - -Since Senlin doesn't require a profile name to be unique, specifying profile -name for the :command:`openstack cluster profile show` command won't guarantee -that a profile object is returned. You may get a ``MultipleChoices`` exception -if more than one profile object match the name. - -As another option, when retrieving a profile (or in fact any other objects, -e.g. a cluster, a node, a policy etc.), you can specify the leading sub-string -of an UUID as the "short ID" for query. For example:: - - $ openstack cluster profile show 70a36cc7 - +------------+---------------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------------+ - | created_at | 2015-07-01T03:18:58 | - | domain_id | None | - | id | 70a36cc7-9fc7-460e-98f6-d44e3302e604 | - | location | None | - | metadata | {} | - | name | my_server | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | spec | +------------+----------------------------------------+ | - | | | property | value | | - | | +------------+----------------------------------------+ | - | | | version | 1.0 | | - | | | type | "os.nova.server" | | - | | | properties | { | | - | | | | "key_name": "oskey", | | - | | | | "flavor": 1, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "private" | | - | | | | } | | - | | | | ], | | - | | | | "image": "cirros-0.3.2-x86_64-uec", | | - | | | | "name": "cirros_server" | | - | | | | } | | - | | +------------+----------------------------------------+ | - | type | os.nova.server-1.0 | - | update_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+---------------------------------------------------------+ - $ openstack cluster profile show 70a3 - +------------+---------------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------------+ - | created_at | 2015-07-01T03:18:58 | - | domain_id | None | - | id | 70a36cc7-9fc7-460e-98f6-d44e3302e604 | - | location | None | - | metadata | {} | - | name | my_server | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | spec | +------------+----------------------------------------+ | - | | | property | value | | - | | +------------+----------------------------------------+ | - | | | version | 1.0 | | - | | | type | "os.nova.server" | | - | | | properties | { | | - | | | | "key_name": "oskey", | | - | | | | "flavor": 1, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "private" | | - | | | | } | | - | | | | ], | | - | | | | "image": "cirros-0.3.2-x86_64-uec", | | - | | | | "name": "cirros_server" | | - | | | | } | | - | | +------------+----------------------------------------+ | - | type | os.nova.server-1.0 | - | update_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+---------------------------------------------------------+ - -As with query by name, a "short ID" won't guarantee that a profile object is -returned even if it does exist. When there are more than one object matching -the short ID, you will get a ``MultipleChoices`` exception. - - -Updating a Profile -~~~~~~~~~~~~~~~~~~ - -In general, a profile object should not be updated after creation. This is a -restriction to keep cluster and node status consistent at any time. However, -considering that there are cases where a user may want to change some -properties of a profile, :program:`openstack cluster` command line does -support the :command:`profile update` sub-command. For example, the following -command changes the name of a profile to ``new_server``:: - - $ openstack cluster profile update --name new_server myserver - -The following command creates or updates the metadata associated with the given -profile:: - - $ openstack cluster profile update --metadata version=2.2 myserver - -Changing the "spec" of a profile is not allowed. The only way to make a change -is to create a new profile using the :command:`profile create` sub-command. - - -Deleting a Profile -~~~~~~~~~~~~~~~~~~ - -When there are no clusters or nodes referencing a profile object, you can -delete it from the Senlin database using the following command:: - - $ openstack cluster profile delete myserver - -Note that in this command you can use the name, the ID or the "short ID" to -specify the profile object you want to delete. If the specified criteria -cannot match any profiles, you will get a ``ResourceNotFound`` exception.If -more than one profile matches the criteria, you will get a ``MultipleChoices`` -exception. For example:: - - $ openstack cluster profile delete my - ERROR(404): The profile (my) could not be found. - Failed to delete any of the specified profile(s). - - -See Also -~~~~~~~~ - -The following is a list of the links to documents related to profile's -creation and usage: - -- :doc:`Working with Profile Types ` -- :ref:`Nova Profile ` -- :ref:`Stack Profile ` -- :ref:`Docker Profile ` -- :doc:`Creating and Managing Clusters ` -- :doc:`Creating and Managing Nodes ` -- :doc:`Managing Cluster Membership ` -- :doc:`Examining Actions ` -- :doc:`Browsing Events ` diff --git a/doc/source/user/receivers.rst b/doc/source/user/receivers.rst deleted file mode 100644 index 49e343bc3..000000000 --- a/doc/source/user/receivers.rst +++ /dev/null @@ -1,185 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-receivers: - -======== -Receiver -======== - -A :term:`Receiver` is used to prepare Senlin engine to react to external alarms -or events so that a specific :term:`Action` can be initiated on a senlin -cluster automatically. For example, when workload on a cluster climbs high, -a receiver can change the size of a specified cluster. - - -Listing Receivers -~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line provides a sub-command -:command:`receiver list` that can be used to enumerate receiver objects known -to the service. For example:: - - $ openstack cluster receiver list - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list receivers, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are receiver properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the receivers using the ``name`` -property in descending order:: - - $ openstack cluster receiver list --sort name:desc - -When sorting the list of receivers, you can use one of ``type``, ``name``, -``action``, ``cluster_id``, ``created_at``. - - -Paginating the List -------------------- - -In case you have a huge collection of receiver objects, you can limit the -number of receivers returned from Senlin server, using the option -:option:`--limit`. For example:: - - $ openstack cluster receiver list --limit 1 - -Yet another option you can specify is the ID of a receiver object after which -you want to see the list starts. In other words, you don't want to see those -receivers with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster receiver list \ - --limit 1 --marker 239d7212-6196-4a89-9446-44d28717d7de - -Combining the :option:`--marker` option and the :option:`--limit` option -enables you to do pagination on the results returned from the server. - - -Creating and Using a Receiver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Currently, Senlin supports two receiver types: "``webhook``" and "``message``". -For the former one, a permanent webhook url is generated for users to trigger -a specific action on a given cluster by sending a HTTP POST request. For the -latter one, a Zaqar message queue is created for users to post a message. -Such a message is used to notify the Senlin service to initiate an action on a -specific cluster. - -Webhook Receiver ----------------- - -When creating a webhook receiver, you are expected to use the option -:option:`--cluster` to specify the target cluster and the option -:option:`--action` to specify the action name. By default, the -:program:`openstack cluster receiver create` command line creates a receiver -of type "``webhook``". User can also explicitly specify the receiver type -using the option :option:`--type`, for example: - -.. code-block:: console - - $ openstack cluster receiver create \ - --cluster test-cluster \ - --action CLUSTER_SCALE_OUT \ - --type webhook \ - test-receiver - +------------+-----------------------------------------------------------+ - | Field | Value | - +------------+-----------------------------------------------------------+ - | action | CLUSTER_SCALE_OUT | - | actor | { | - | | "trust_id": "2e76547947954e6ea62b61a658ffb8e5" | - | | } | - | channel | { | - | | "alarm_url": "http://10.20.10.17:8777/v1/webhooks/...." | - | | } | - | cluster_id | 9f1883a7-6837-4fe4-b621-6ec6ba6c3668 | - | created_at | 2018-02-24T09:23:48Z | - | domain_id | None | - | id | 2a5a266d-0c3a-456c-bbb7-f8b26ef3b7f3 | - | location | None | - | name | test-receiver | - | params | {} | - | project_id | bdeecc1b58004bb19302da77ac056b44 | - | type | webhook | - | updated_at | None | - | user_id | e1ddb7e7538845968789fd3a863de928 | - +------------+-----------------------------------------------------------+ - -Senlin service will return the receiver information with its channel ready to -receive HTTP POST requests. For a webhook receiver, this means you can check -the "``alarm_url``" field of the "``channel``" property. You can use this URL -to trigger the action you specified. - -The following command triggers the receiver by sending a ``POST`` request to -the URL obtained from its ``channel`` property, for example: - -.. code-block:: console - - $ curl -X POST - - -Message Receiver ----------------- - -A message receiver is different from a webhook receiver in that it can trigger -different actions on different clusters. Therefore, option :option:`--cluster` -and option :option:`--action` can be omitted when creating a message receiver. -Senlin will check if the incoming message contains such properties. - -You will need to specify the receiver type "``message``" using the option -:option:`--type` when creating a message receiver, for example: - -.. code-block:: console - - $ openstack cluster receiver create \ - --type message \ - test-receiver - -Senlin service will return the receiver information with its channel ready to -receive messages. For a message receiver, this means you can check the -"``queue_name``" field of the "``channel``" property. - -Once a message receiver is created, you (or some software) can send messages -with the following format to the named Zaqar queue to request Senlin service: - -.. code-block:: python - - { - "messages": [ - { - "ttl": 300, - "body": { - "cluster": "test-cluster", - "action": "CLUSTER_SCALE_OUT", - "params": {"count": 2} - } - } - ] - } - -More examples on sending message to a Zaqar queue can be found here: - -https://opendev.org/openstack/python-zaqarclient/src/branch/master/examples - -.. note:: - - Users are permitted to trigger multiple actions at the same time by sending - more than one message to a Zaqar queue in the same request. In that case, - the order of actions generated depends on how Zaqar sorts those messages. diff --git a/doc/specs/README.rst b/doc/specs/README.rst deleted file mode 100644 index de254589b..000000000 --- a/doc/specs/README.rst +++ /dev/null @@ -1,27 +0,0 @@ -INTRO -===== - -This directory holds the proposals of non-trivial changes to senlin. We host -them here to avoid the potential headaches in managing yet another project, -say `senlin-specs`. When the needs rise up for a dedicated project for -proposals, we can create such a project and migrate things here. - - -DIRECTORY LAYOUT -================ - -Proposals will be put into this directory during review. After being reviewed, -it will be migrated into the `rejected` subdirectory or the `approved` -subdirectory respectively. - - -rejected --------- - -A subdirectory for proposals that were rejected. - - -approved --------- - -A subdirectory for proposals that were approved. diff --git a/doc/specs/approved/README.rst b/doc/specs/approved/README.rst deleted file mode 100644 index 6d0df9dbd..000000000 --- a/doc/specs/approved/README.rst +++ /dev/null @@ -1,3 +0,0 @@ -This directory holds the feature proposals that have been approved. Once the -features are landed, the contents should be migrated into a design document -instead of being kept here. diff --git a/doc/specs/approved/container-cluster.rst b/doc/specs/approved/container-cluster.rst deleted file mode 100644 index d787ca390..000000000 --- a/doc/specs/approved/container-cluster.rst +++ /dev/null @@ -1,176 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Container Cluster -================= - -The mission of the Senlin project is to provide a generic clustering service -for an OpenStack cloud. Currently Senlin provides Nova instance type and -Heat stack type clustering service, it's natural to think about container -cluster. - -Problem Description -=================== - -As for container service, Magnum is a project which provides an API for users -to build the container orchestration engine such as Docker Swarm, Kubernetes -and Apache Mesos. By using these engines users can build their container cloud, -and manage the cloud. But these container clouds created by these tools are not -managed by Magnum after they are created. That means those containers are not -OpenStack-managed resources, thus other projects which want to use container -resources can't invoke Magnum to acquire them. Furthermore, the dependency on -those engines will cause version management problems and makes it difficult -to test the container engine because the engines are not implemented in Python -language. For the cloud operators who want to use OpenStack to manage -containers, they may want OpenStack's own container service instead of learning -how to use docker swarm etc. - -Use Cases -========= - -For users who want to use container services, they may want to use container -cluster instead of a single container. In an OpenStack cloud, user may want -to deploy containers cluster on baremetal machines or on all or some of the -specific virtual machines in the cloud. This container cluster is desired -to be a scalable, HA, multi-tenant support and high-security cloud and can -be easily controlled by invoking OpenStack standard REST API. - -Proposed Changes -================ - -1. Docker library - Senlin would like to support Docker type container resource. As Docker - provides API to developers, it is very easy to create/delete a container - resource by invoking Docker API directly. - Docker driver will be added for container management. -2. Container Profile - It is necessary to add a new type of profile for container to start with. - In the container profile the required properties like network, volume etc. - will be contained to created a container. -3. Scheduling - To decide to start containers in which virtual/baremetal machines, a - scheduler is needed. There are some existing container schedulers like - docker swarm which are widely used in production, but by thinking about - Senlin's feature, it is reasonable to invent a scheduler which can support - container auto-scaling better. For example, starting containers - preferentially in specified nodes whose cpu utilization is lower than a - certain value. - This is an intelligent but complicated solution for container scheduling, - to meet the limited needs, Senlin placement policy can be used to work as - a scheduler to take place of complicated scheduler implementation. - For the simplest case, add 'host_node' and 'host_cluster' properties into - container profile, which can be used to determine the placement of - containers. Since Senlin supports scaling, some rules should be obeyed - to cooperate host_node and host_cluster usage. - - * Only container type profile can contain 'host_node' and 'host_cluster' - properties. - * Container type profile must contain both 'host_node' and 'host_cluster' - properties, but either not both of them can be None. - * Host_node must belong to host_cluster. - * If host_node is None and host_cluster is not None, container will be - started on some node of the cluster randomly.(This may be changed in - future, to support the case of low CPU, memory usage priority.) -4. Network - To allocate an IP address to every container, a network for container is - desired before creating a container. Kuryr brings container networking to - neutron which can make container networking management similar to Nova - server. Senlin will introduce Kuryr for container networking management. -5. Storage - For the virtual machines in which containers will be started, it is - necessary to attach a volume in advance. The containers started in the - virtual machines will share the volume. Currently Flocker and Rexray are - the options. -6. Policies - The policies for container service are different from virtual machines. - For example, in placement policy the specified nodes of azs or regions - should be provided. -7. Test - Add test cases for container service on both client and server sides. - -Alternatives ------------- - -Any other ideas of managing containers by Senlin. - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -Not clear. - -Other end user impact ---------------------- - -User can use Senlin commands to create/update/delete a container cluster. -Managing containers will become much easier. - -Performance Impact ------------------- - -None - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -xuhaiwei -anyone interested - -Work Items ----------- - -Depends on the design plan - -Dependencies -============ - -Depends on Docker. - -Testing -======= - -Undecided - -Documentation Impact -==================== - -Documentation about container cluster will be added. - -References -========== - -None - -History -======= - -Approved: Newton -Implemented: Newton diff --git a/doc/specs/approved/generic-event.rst b/doc/specs/approved/generic-event.rst deleted file mode 100644 index 87f8b64f2..000000000 --- a/doc/specs/approved/generic-event.rst +++ /dev/null @@ -1,256 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================== -Generic Event Interface and Backends -==================================== - -The URL of the launchpad blueprint: - -https://blueprints.launchpad.net/senlin/+spec/generic-event - -Currently senlin has a DB backend to log events that might be interested to -users/operators. However, we will also need to send out event notifications -for integration with 3rd party software/services. Users/operators may want to -dump the events into a file or a time series database for processing. - -The blueprint proposes a generic interface for dumping events/notifications. -Such an interface can be implemented by different backends as event plugins. - -Problem description -=================== - -While the Senlin engine is operating clusters or nodes, interacting with other -services or enforcing policies, there are many cases where the operations -(and the results) should be dumped. - -Currently, Senlin only has a builtin event table in database. It is -accumulating very fast, it is not flexible and the content is not versioned. - -To integrate with other services, Senlin will need to generate and send -notifications when certain events happen. More complex (pre-)processing can -be offloaded to service dedicated to this task (e.g. Panko from Ceilomter), -but the basic notifications should always come from the engine. -(Note that we treat "notifications" as a special form of events, i.e. they -are "events on the wire", they are events sent to a message queue for other -services/software to consume.) - -As Senlin evolves, changes are inevitable regarding to the content of the -payload of such events and/or notifications. To best protect users investment -in downstream event processing, we will need to be very explicit about the -content and format of each and every event/notification. - -The format of event/notification should be well documented so that users or -developers of downstream software don't need digging into Senlin's source code -to find out the exact format of each event type or notification type. This -should remain true even when the event/notification format evolves over time. - -There is no one-size-fits-all solution that meets all requirements from the -use cases enumerated in the "Use Cases" subsection below. The event generation -has to be an open framework, with a generic interface, that allows for -diversified backend implementation, aka, drivers. - -Events or notifications are inherently of different criticality or severity. -Users should be able to filter the events by their severity easily. Similarly, -events or notifications are generated from different types of modules, e.g. -``engine``, ``profile``, ``policy``, we may want to enable an operator to -specify the sources of events to include or exclude. Note the source-based -filtering is not a high priority requirement as we see it today. - -Use Cases ---------- - -The dumping of events could serve several use cases: - -- Problem diagnosis: Although there are cases where users can check the logs - from the engine (let's suppose we are already dumping rich information - already), it is unlikely that everyone is granted access to the raw log - files. Event logs are a replacement for raw log files. -- Integration with Other Software: When building a solution by integrating - Senlin with other software/services, the said service may need Senlin to - emit events of interests so that some operations can be adjusted - dynamically. -- Auditing: In the case where there are auditing requirements regarding - user behavior analysis or resource usage tracking, a history of user - operations would be very helpful to conduct this kind of analysis. - -Proposed change -=============== - -Add an interface definition for event logging and notification. This will be -an unified interface for all backends. The interface is a generalization of -the existing event dumping support. - -Make the existing event module (which is dumping events into DB tables) a -plugin that implements the logging interface. - -Model all events dumped today as versioned objects. Different event types will -use different objects. This will be done by preserving the existing DB schema -of the ``event`` table if possible. And, more importantly, the event -abstraction should match the expectations from notification interface from -the ``oslo.messaging`` package. We will learn from the versioned notification -design from Nova but we are going one step further. - -Add filters for event/notification generation, regarding the sererity and the -source. Expose these filters as configurable options in ``senlin.conf``. -These filters (among others) may deserve a new section, but we will decide -when we are there. - -Add stevedore plugin loading support to logging, with "``database``" and -"``messaging``" set as default. We may add a ``json file`` backend -for demonstration's purpose, but that is optional. The backend of event -logging (and notification) will be exposed as a multi-string configuration -option in ``senlin.conf``. - -Following the "api-ref" scheme for API documentation, we will document the -formats of all events/notifications in REST files. - -Alternatives ------------- - -Keep the event generation and notification separate. This seems a duplication -of a lot logic. From the source location where you want to fire an event and -also a log and also a notification, you may have to do three calls. - -Data model impact ------------------ - -We will strive to keep the existing DB schema (especially the ``event`` table -format) unless we have a good reason to add columns. - -REST API impact ---------------- - -There is no change to REST API planned. - -Security impact ---------------- - -One thing we not so sure is where to draw the line between "proper" and -"excessive" dumping of events. We will need some profiling when trading things -off. - -Both events and notifications will leverage the multi-tenancy support (i.e. -``project`` will be include in the payload), so tenant isolation won't be a -problem. - -Notifications impact --------------------- - -Well... this spec is about constructing the infrastructure for notification, -in addition to events and logs. - -Other end user impact ---------------------- - -Users will be able to see notifications from Senlin in the message queue. -Users will get detailed documentation about the event/notification format. - -No change to python-senlinclient will be involved. - -There could be changes to senlin-dashboard if we change the response from the -``event-list`` or ``event-show`` API, but that is not expected. - -Performance Impact ------------------- - -* An overloaded message queue may lead to slower response of senlin-engine? - Not quite sure. - -* An overloaded DBMS may slow down the senlin-engine. - -* High frequency of event generation, based on common sense, will impact the - service performance. - -Other deployer impact ---------------------- - -There is no new dependency to other packages planned. - -There will be several new config options added. We will make them as generic -as possible because the infrastructure proposed is a generic one. We will -include database and message as the default backend, which should work in -most real deployments. - -The changes to the configuration file will be documented in release notes. - -Developer impact ----------------- - -There will be some reference documents for event/notification format design -for developers of downstream software/service. - -There will be some developer documents for adding new logging backends. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Qiming - -Other contributors: - Anyone who wish to adventure ... - -Work Items ----------- - -Currently identified work items: - -- Abstract class (interface) for logging; -- Rebase event dumping module onto this interface; -- Versioned objects for existing events; -- Driver for dumping events (thus become notifications) to message queue; -- Dynamic loading of both backends (database and message); -- Configuration options for backend selection and customization; -- Documentation of event formats; -- User documentation for events (improvement); -- Developer documentation for new logging backends; - -Dependencies -============ - -No dependency on other specs/bps/projects. - -Need to watch changes in ``oslo.messaging`` and ``oslo.versionedobjects`` to -tune the implementation. - -Testing -======= - -Only unit tests are planned. - -There is not yet plan for API test, functional test, stress test or -integration test. - -Documentation Impact -==================== - -New documentation: - -- Documentation of event formats; -- User documentation for events (improvement); -- Developer documentation for new logging backends; -- Release notes - -References -========== - -N/A - -History -======= - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Ocata - - Introduced diff --git a/doc/specs/cluster-fast-scaling.rst b/doc/specs/cluster-fast-scaling.rst deleted file mode 100644 index ca35812a7..000000000 --- a/doc/specs/cluster-fast-scaling.rst +++ /dev/null @@ -1,159 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================== -Cluster Fast Scaling -==================== - -The URL of launchpad blueprint: - -https://blueprints.launchpad.net/senlin/+spec/add-attribute-fast-scaling-to-cluster - -The major function of senlin is managing clusters, change the capacity of -cluster use scale out and scale in operation. Generally a single scaling -operation will cost tens of seconds, even a few minutes in extreme cases. -It's a long time for actual production environment, so we need to improve -senlin for fast scaling. - -Rather than improve the performance of hardware or optimize code, a better way -is to create some standby nodes while create a new cluster. When cluster need -to change the capacity immediately or replace some nodes in 'error' state to -'active' state nodes, add nodes form standby nodes to cluster, or remove error -nodes from cluster and add active nodes from standby nodes to cluster. - -To make cluster scaling fast, the spec proposes to extend senlin for create -standby nodes and improve scaling operation. - - -Problem description -=================== - -Before real scaling a cluster, senlin need to do many things, the slowest -process is to create or delete a node. - -Use Cases ---------- - -If senlin support fast scaling, the follow cases will be possible: - -- Change the capacity of cluster immediately, no longer waiting for creating -or deleting nodes. - -- Replace the error nodes from cluster immediately, improve high availability -for cluster. - -- Improve the situation that scaling many times in a short time. - -Proposed change -=============== - -1. Add a new attribute 'fast_scaling' in metadata to cluster, with the -attribute set, senlin will create standby nodes when create a new cluster. -The number of standby nodes could be specify, but sum of standby nodes and -nodes in cluster should less than max size of the cluster. - -2. Revise cluster create and cluster delete operation for support new attr, -delete standby nodes when delete a cluster. - -3. Revise scale out and scale in operation, with the new attribute set, add -nodes form standby nodes to cluster or remove nodes from cluster to standby -nodes first. - -4. Revise health policy, check the state of standby nodes and support replace -error nodes to active nodes from standby nodes. - -5. Revise deletion policy, delete nodes or remove nodes to standby nodes when -perform deletion operation. - -Alternatives ------------- - -Any other ideas of fast scale a cluster. - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -The standby nodes will claimed some resources. We should control the number -of standby nodes in a reasonable range. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -chohoor(Hongbin Li) - -Work Items ----------- - -Depends on the design plan. - - -Dependencies -============ - -None - - -Testing -======= - -Need unit tests. - - -Documentation Impact -==================== - -Documentation about api and operation should be update. - - -References -========== - -None - - -History -======= - -None diff --git a/doc/specs/fail-fast-on-locked_resource.rst b/doc/specs/fail-fast-on-locked_resource.rst deleted file mode 100644 index 7d50762b5..000000000 --- a/doc/specs/fail-fast-on-locked_resource.rst +++ /dev/null @@ -1,257 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================= -Fail fast on locked resources -============================= - - -When an operation on a locked resource (e.g. cluster or node) is requested, -Senlin creates a corresponding action and calls on the engine dispatcher to -asynchronously process it. If the targeted resource is locked by another -operation, the action will fail to process it and the engine will ask the -dispatcher to retry the action up to three times. If the resource is still -locked after three retries, the action is considered failed. The user making -the operation request will not know that an action has failed until the -retries have been exhausted and it queries the action state from Senlin. - -This spec proposes to check the lock status of the targeted resource and fail -immediately if it is locked during the synchronous API call by the user. The -failed action is not automatically retried. Instead it is up to the user to -retry the API call as desired. - - -Problem description -=================== - -The current implementation where failed actions are automatically retried can -lead to starvation situations when a large number of actions on the same target -cluster or node are requested. E.g. if a user requests a 100 scale-in operations -on a cluster, the Senlin engine will take a long time to process the retries and -will not be able to respond to other commands in the meantime. - -Another problem with the current implementation is encountered when health -checks are running against a cluster and the user is simultaneously performing -operations on it. When the health check thread determines that a node is -unhealthy (1), the user could request a cluster scale-out (2) before the health -check thread had a chance to call node recovery (4). In that case the first node -recovery will fail because the cluster is already locked and the node recovery -action will be retried in the background. However after the scale-out -completes and the next iteration of the health check runs, it might still see -the node as unhealthy and request another node recovery. In that case the node -will be unnecessarily recovered twice. - -:: - - +---------------+ +---------------+ +-------+ - | HealthManager | | SenlinEngine | | User | - +---------------+ +---------------+ +-------+ - | -----------------\ | | - |-| Health check | | | - | | thread starts. | | | - | |----------------| | | - | | | - | (1) Is Node healthy? No. | | - |------------------------- | | - | | | | - |<------------------------ | | - | | | - | | (2) Scale Out Cluster. | - | |<---------------------------| - | | | - | | (3) Lock cluster. | - | |------------------ | - | | | | - | |<----------------- | - | | | - | (4) Recover node. | | - |-------------------------------------------------->| | - | | | - | (5) Recover node action created. | | - |<--------------------------------------------------| | - | | | - | | (6) Cluster is locked. | - | | Retry node recover. | - | |----------------------- | - | | | | - | |<---------------------- | - | | | - | (7) Get node recover action status. | | - |-------------------------------------------------->| | - | | | - | (8) Node recover action status is failed. | | - |<--------------------------------------------------| | - | ---------------\ | | - |-| Health check | | | - | | thread ends. | | | - | |--------------| | | - | | | - -Finally, there are other operations that can lead to locked clusters that are -never released as indicated in this bug: -https://bugs.launchpad.net/senlin/+bug/1725883 - -Use Cases ---------- - -As a user, I want to know right away if an operation on a cluster or node fails -because the cluster or node is locked by another operation. By being able to -receive immediate feedback when an operation fails due to a locked resource, the -Senlin engine will adhere to the fail-fast software design principle [1] and -thereby reducing the software complexity and potential bugs due to -locked resources. - -Proposed change -=============== - - -1. **All actions** - - Before an action is created, check if the targeted cluster or node is - already locked in the cluster_lock or node_lock tables. - - * If the target cluster or node is locked, throw a ResourceIsLocked - exception. - * If the action table already has an active action operating on the - target cluster or node, throw a ActionConflict exception. An action - is defined as active if its status is one of the following: - READY, WAITING, RUNNING OR WAITING_LIFECYCLE_COMPLETION. - * If the target cluster or node is not locked, proceed to create the - action. - -2. **ResourceIsLocked** - - New exception type that corresponds to a 409 HTTP error code. - -3. **ActionConflict** - - New exception type that corresponds to a 409 HTTP error code. - - -Alternatives ------------- - -None - - -Data model impact ------------------ - -None - -REST API impact ---------------- - -* Alls Action (changed in **bold**) - - :: - - POST /v1/clusters/{cluster_id}/actions - - - - Normal HTTP response code(s): - - =============== =========================================================== - Code Reason - =============== =========================================================== - 202 - Accepted Request was accepted for processing, but the processing has - not been completed. A 'location' header is included in the - response which contains a link to check the progress of the - request. - =============== =========================================================== - - - Expected error HTTP response code(s): - - ========================== =============================================== - Code Reason - ========================== =============================================== - 400 - Bad Request Some content in the request was invalid. - 401 - Unauthorized User must authenticate before making a request. - 403 - Forbidden Policy does not allow current user to do this - operation. - 404 - Not Found The requested resource could not be found. - **409 - Conflict** **The requested resource is locked by** - **another action** - 503 - Service Unavailable Service unavailable. This is mostly - caused by service configuration errors which - prevents the service from successful start up. - ========================== =============================================== - - - -Security impact ---------------- - -None - -Notifications impact --------------------- - - -Other end user impact ---------------------- - -The python-senlinclient requires modification to return the 409 HTTP error code -to the user. - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -dtruong@blizzard.com - -Work Items ----------- - -None - -Dependencies -============ - -None - - -Testing -======= - -Unit tests and tempest tests are needed for the new action request behavior when -a resource is locked. - -Documentation Impact -==================== - -End User Guide needs to updated to describe the new behavior of action -requests when a target resource is locked. The End User Guide should also -describe that the user can retry an action if they receive 409 HTTP error code. - -References -========== - -[1] https://www.martinfowler.com/ieeeSoftware/failFast.pdf - - -History -======= - -None diff --git a/doc/specs/lifecycle-hook.rst b/doc/specs/lifecycle-hook.rst deleted file mode 100644 index a0ffdca65..000000000 --- a/doc/specs/lifecycle-hook.rst +++ /dev/null @@ -1,390 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================= -Add lifecycle hooks for scale in action -======================================= - - -The AWS autoscaling service provides a 'lifecycle hook' feature that Senlin -currently lacks. Lifecycle hooks during scaling operations allow the user or -an application to perform custom setup or clean-up of instances. - -This spec proposes to add lifecycle hook specific properties to the deletion -policy applied during node removal operations (i.e. scale-in, cluster-resize, -cluster-node-del and node-delete actions). The lifecycle hook properties specify -a timeout and a Zaqar queue as the notification target. If the node removal -operation detects that a deletion policy with lifecycle hook properties is -attached, it will send a lifecycle hook message to the notification target -for each node identified for deletion. The lifecycle hook message contains the -node ID of the instance to be deleted and a lifecycle action token. In -addition, the node removal operation will defer the actual deletion of those -nodes until the timeout in the deletion policy has been reached. - -This spec also adds a new 'complete lifecycle' API endpoint. When this API -endpoint is called with the lifecycle action token from the lifecycle hook -message, Senlin immediately deletes the node that was identified by the -node removal operation for deletion. Calling the 'complete lifecycle' API -endpoint also cancels the deferred node deletion initiated by the node removal -operation. - -Problem description -=================== - -When performing a scale-in operation with Senlin, an instance might require -custom cleanup. A lifecycle hook sends a notification that lets the receiving -application perform those custom clean-up steps on an instance before the node -is deleted. - -After the clean-up has finished, the application can wait for an expired -lifecycle hook timeout that automatically triggers the deletion of the nodes. -Alternatively, the application can send a 'complete lifecycle' message to -Senlin to proceed with the node deletion without waiting for the lifecycle -hook timeout to expire. - -Use Cases ---------- - -The typical use case occurs when a node must move its in-progress workload off -to another node before it can be terminated. During auto scale-in events, an -application must receive a Zaqar message to start those custom cleanups on -the termination-pending nodes. If the application does not complete the -lifecycle by a specified timeout, Senlin automatically deletes the node. If -the application finishes the cleanup before the specified timeout expires, -the application notifies Senlin to complete the lifecycle for a specified -node. This triggers the immediate deletion of the node. - -Proposed change -=============== - -1. **Deletion policy** - - New lifecycle hook specific properties: - - * timeout - * target type - * target name - -2. **New action status** - - WAITING_LIFECYCLE_COMPLETION - -3. **Scale-in, cluster-resize, cluster-node-del, node-delete actions** - - If deletion policy with lifecycle hook properties is attached, the above - actions differ from current implementation as follows: - - * For each node identified to be deleted: - - * DEL_NODE action is created with status as WAITING_LIFECYCLE_COMPLETION. - * Send a message to the target name from deletion policy. - The message contains: - - * lifecycle_action_token: same as DEL_NODE action ID - * node_id - - * Create dependencies between the DEL_NODE actions and the original action - - * Wait for dependent actions to complete or lifecycle timeout specified in - deletion policy to expire - - * If lifecycle timeout is reached: - - * For each DEL_NODE action: - - * If DEL_NODE action status is WAITING_LIFECYCLE_COMPLETION, then change - action status to READY - - * Call dispatcher.start_action - -4. **'Complete lifecycle' API endpoint** - - The new API endpoint to signal completion of lifecycle. It expects - lifecycle_action_token as a parameter. - - * Use lifecycle_action_token to load DEL_NODE action - * If DEL_NODE action status is WAITING_LIFECYCLE_COMPLETION, then change - action state to READY and call dispatcher.start_action - -Alternatives ------------- - -Alternatively, attach a deletion policy with a grace period. The grace -period allows an application to perform clean-up of instances. However, -Senlin must implement event notifications in form of a HTTP sink or a Zaqar -queue so that the third party application knows which nodes are selected for -deletion. - -This solution lacks the 'complete lifecycle' action allowing an application to -request the node deletion before the timeout expires. This is undesirable -because the scale-in action locks the cluster while it is sleeping for the -grace period value. This will not work if the application finishes the -clean-up of the instances before the grace period expires and it wants to -perform another cluster action such as scale-out. - - -Data model impact ------------------ - -None - -REST API impact ---------------- - -* Complete Lifecycle Action - - :: - - POST /v1/clusters/{cluster_id}/actions - - Complete lifecycle action and trigger deletion of nodes. - - - Normal HTTP response code(s): - - =============== =========================================================== - Code Reason - =============== =========================================================== - 202 - Accepted Request was accepted for processing, but the processing has - not been completed. A 'location' header is included in the - response which contains a link to check the progress of the - request. - =============== =========================================================== - - - Expected error HTTP response code(s): - - ========================== =============================================== - Code Reason - ========================== =============================================== - 400 - Bad Request Some content in the request was invalid. - 401 - Unauthorized User must authenticate before making a request. - 403 - Forbidden Policy does not allow current user to do this - operation. - 404 - Not Found The requested resource could not be found. - 503 - Service Unavailable Service unavailable. This is mostly - caused by service configuration errors which - prevents the service from successful start up. - ========================== =============================================== - - - Request Parameters: - - ================================= ======= ======= ======================= - Name In Type Description - ================================= ======= ======= ======================= - OpenStack-API-Version (Optional) header string API microversion - request. - Takes the form of - OpenStack-API-Version: - clustering 1.0, where - 1.0 is the requested - API version. - cluster_id path string The name, UUID or - short-UUID of a cluster - object. - action body object A structured definition - of an action to be - executed. The object is - usually expressed as: - : { - : - - : - - ... - } - - The - indicates the requested - action while the - keys provide - the associated - parameters to the - action. Each - individual action - has its own set of - parameters. - - The action_name in the - request body has to be - complete_lifecycle. - lifecycle_action_token body UUID The UUID of the - lifecycle action to be - completed. - ================================= ======= ======= ======================= - - - Request example:: - - { - "complete_lifecycle": { - "lifecycle_action_token": "ffbb9175-d510-4bc1-b676-c6aba2a4ca81" - } - } - - - Response parameters: - - ================================= ======= ======= ======================= - Name In Type Description - ================================= ======= ======= ======================= - X-OpenStack-Request-ID (Optional) header string A unique ID for - tracking service - request. The request - ID associated with - the request by default - appears in the service - logs - Location header string For asynchronous object - operations, the - location header - contains a string - that can be interpreted - as a relative URI - from where users can - track the progress - of the action triggered - action body string A string - representation of - the action for - execution. - ================================= ======= ======= ======================= - -* Deletion Policy - - Additional properties specific to the lifecycle hook are added to the Deletion - policy. The existing properties from senlin.policy.deletion-1.0 are carried - over into senlin.policy.deletion-1.1 and not listed below. - - :: - - name: senlin.policy.deletion-1.1 - schema: - hooks: - description: Lifecycle hook properties - required: false - type: Map - updatable: false - schema: - type: - constraints: - - constraint: - - zaqar - - webhook - type: AllowedValues - default: zaqar - description: The type of lifecycle hook - required: false - type: String - updatable: false - params: - description: Specific parameters for the hook type - required: false - type: Map - updatable: false - schema: - queue: - description: Zaqar queue to receive lifecycle hook message - required: false - type: String - updatable: false - url: - description: Url sink to which to send lifecycle hook message - required: false - type: String - updatable: false - timeout: - description: Number of seconds before actual deletion happens - required: false - type: Integer - updatable: false - - -* Lifecycle Hook Message - - The lifecycle hook message is sent to the Zaqar queue when a scale_in - request is received and the cluster has the deletion policy with lifecycle - hook properties attached. It includes: - - ========================== ======= ======================================= - Name Type Description - ========================== ======= ======================================= - lifecycle_action_token UUID The action ID of the 'complete lifecycle' - action. - node_id UUID The cluster node ID to be terminated - lifecycle_transition_type string The type of lifecycle transition - ========================== ======= ======================================= - -Security impact ---------------- - -None - -Notifications impact --------------------- - -A new notification is sent to a specified Zaqar queue. - -Other end user impact ---------------------- - -The python-senlinclient requires modification to allow the user to perform -'complete lifecycle' action. - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -The openstacksdk requires modification to add the new 'complete -lifecycle' API endpoint. - - -Implementation -============== - -Assignee(s) ------------ - -dtruong@blizzard.com - -Work Items ----------- - -None - -Dependencies -============ - -None - - -Testing -======= - -Tempest tests for the new API endpoint and policy will be added. - -Documentation Impact -==================== - -End User Guide needs to updated for new API endpoint, deletion policy changes -and behavior changes to scale-in, cluster-resize, cluster-node-del and -node-delete actions. - -References -========== - -None - - -History -======= - -None diff --git a/doc/specs/multiple-detection-modes.rst b/doc/specs/multiple-detection-modes.rst deleted file mode 100644 index 99a11ca33..000000000 --- a/doc/specs/multiple-detection-modes.rst +++ /dev/null @@ -1,317 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================= -Multiple polling detection modes in Health Policy -================================================= - -The health policy allows a user specify a detection mode to use for checking -node health. In the current implementation only one of the following detection -modes is allowed: - -* NODE_STATUS_POLLING -* NODE_STATUS_POLL_URL -* LIFECYCLE_EVENTS - -This spec proposes to let the user specify multiple polling detection modes in -the same health policy. E.g. the user can specify both NODE_STATUS_POLLING and -NODE_STATUS_POLL_URL detection modes in the same health policy. - - -Problem description -=================== - -The current implementation only allows a health policy to specify a single -detection mode to use for verifying the node health. However, there are -situations in which the user would want to have two detection modes checked and -only rebuild a node if both modes failed. Using multiple detection modes has the -benefit of fault tolerant health checks where one detection mode takes over in -case the other detection mode cannot be completed. - - -Use Cases ---------- - -As a user, I want to specify multiple polling detection modes for a given health -policy. The order of the polling detection modes used when creating the health -policy specifies the order of evaluation for the health checks. As a user, I also -want to be able to specify if a single detection mode failure triggers a node -rebuild or if all detection modes have to fail before a node is considered -unhealthy. - - -Proposed change -=============== - - -1. **Health Policy** - - Increment health policy version to 1.1 and implement the following schema: - -:: - - name: senlin.policy.health-1.1 - schema: - detection: - description: Policy aspect for node failure detection. - required: true - schema: - detection_modes: - description: List of node failure detection modes. - required: false - schema: - '*': - description: Node failure detection mode to try - required: false - schema: - options: - default: {} - required: false - schema: - poll_url: - default: '' - description: URL to poll for node status. See documentation for - valid expansion parameters. Only required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: String - updatable: false - poll_url_conn_error_as_unhealthy: - default: true - description: Whether to treat URL connection errors as an indication - of an unhealthy node. Only required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: Boolean - updatable: false - poll_url_healthy_response: - default: '' - description: String pattern in the poll URL response body that - indicates a healthy node. Required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: String - updatable: false - poll_url_retry_interval: - default: 3 - description: Number of seconds between URL polling retries before - a node is considered down. Required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: Integer - updatable: false - poll_url_retry_limit: - default: 3 - description: Number of times to retry URL polling when its return - body is missing POLL_URL_HEALTHY_RESPONSE string before a node - is considered down. Required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: Integer - updatable: false - poll_url_ssl_verify: - default: true - description: Whether to verify SSL when calling URL to poll for - node status. Only required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: Boolean - updatable: false - type: Map - updatable: false - type: - constraints: - - constraint: - - LIFECYCLE_EVENTS - - NODE_STATUS_POLLING - - NODE_STATUS_POLL_URL - type: AllowedValues - description: Type of node failure detection. - required: true - type: String - updatable: false - type: Map - updatable: false - type: List - updatable: false - interval: - default: 60 - description: Number of seconds between pollings. Only required when type is - 'NODE_STATUS_POLLING' or 'NODE_STATUS_POLL_URL'. - required: false - type: Integer - updatable: false - node_update_timeout: - default: 300 - description: Number of seconds since last node update to wait before checking - node health. - required: false - type: Integer - updatable: false - recovery_conditional: - constraints: - - constraint: - - ALL_FAILED - - ANY_FAILED - type: AllowedValues - default: ANY_FAILED - description: The conditional that determines when recovery should be performed - in case multiple detection modes are specified. 'ALL_FAILED' - means that all detection modes have to return failed health checks before - a node is recovered. 'ANY_FAILED' means that a failed health - check with a single detection mode triggers a node recovery. - required: false - type: String - updatable: false - type: Map - updatable: false - recovery: - description: Policy aspect for node failure recovery. - required: true - schema: - actions: - description: List of actions to try for node recovery. - required: false - schema: - '*': - description: Action to try for node recovery. - required: false - schema: - name: - constraints: - - constraint: - - REBOOT - - REBUILD - - RECREATE - type: AllowedValues - description: Name of action to execute. - required: true - type: String - updatable: false - params: - description: Parameters for the action - required: false - type: Map - updatable: false - type: Map - updatable: false - type: List - updatable: false - fencing: - description: List of services to be fenced. - required: false - schema: - '*': - constraints: - - constraint: - - COMPUTE - type: AllowedValues - description: Service to be fenced. - required: true - type: String - updatable: false - type: List - updatable: false - node_delete_timeout: - default: 20 - description: Number of seconds to wait for node deletion to finish and start - node creation for recreate recovery option. Required when type is 'NODE_STATUS_POLL_URL - and recovery action is RECREATE'. - required: false - type: Integer - updatable: false - node_force_recreate: - default: false - description: Whether to create node even if node deletion failed. Required - when type is 'NODE_STATUS_POLL_URL' and action recovery action is RECREATE. - required: false - type: Boolean - updatable: false - type: Map - updatable: false - - - -Alternatives ------------- - -None - - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -dtruong@blizzard.com - -Work Items ----------- - -None - -Dependencies -============ - -None - - -Testing -======= - -Unit tests and tempest tests are needed to test multiple detection modes. - -Documentation Impact -==================== - -End User Guide needs to be updated to describe how multiple detection modes can -be set. - -References -========== - -None - -History -======= - -None diff --git a/doc/specs/rejected/README.rst b/doc/specs/rejected/README.rst deleted file mode 100644 index 331271e32..000000000 --- a/doc/specs/rejected/README.rst +++ /dev/null @@ -1,2 +0,0 @@ -This directory holds the feature proposals that have been rejected. These -files are archived here for references. diff --git a/doc/specs/template.rst b/doc/specs/template.rst deleted file mode 100644 index c5f150ea1..000000000 --- a/doc/specs/template.rst +++ /dev/null @@ -1,363 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Example Spec - The title of your blueprint -========================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/senlin/+spec/example - -Introduction paragraph -- why are we doing anything? A single paragraph of -prose that operators can understand. The title and this first paragraph -should be used as the subject line and body of the commit message -respectively. - -Some notes about the senlin spec and blueprint process: - -* Not all blueprints need a spec. A blueprint is primarily used for tracking - a series of changes which could be easy to implement and easy to review. - A spec, on the other hand, usually warrants a discussion among the - developers (and reviewers) before work gets started. - -* The aim of this document is first to define the problem we need to solve, - and second agree the overall approach to solve that problem. - -* This is not intended to be extensive documentation for a new feature. - For example, there is no need to specify the exact configuration changes, - nor the exact details of any DB model changes. But you should still define - that such changes are required, and be clear on how that will affect - upgrades. - -* You should aim to get your spec approved before writing your code. - While you are free to write prototypes and code before getting your spec - approved, its possible that the outcome of the spec review process leads - you towards a fundamentally different solution than you first envisaged. - -* API changes are held to a much higher level of scrutiny. As soon as an API - change merges, we must assume it could be in production somewhere, and as - such, we then need to support that API change forever. To avoid getting that - wrong, we do want lots of details about API changes upfront. - -Some notes about using this template: - -* Please wrap text at 79 columns. - -* The filename in the git repository should match the launchpad URL, for - example a URL of: https://blueprints.launchpad.net/senlin/+spec/some-thing - should be named ``some-thing.rst``. - -* Please do not delete any of the *sections* in this template. If you have - nothing to say for a whole section, just write: None - -* For help with syntax, see http://www.sphinx-doc.org/en/stable/rest.html - -* To test out your formatting, build the docs using tox and see the generated - HTML file in doc/build/html/specs/ - -* If you would like to provide a diagram with your spec, ascii diagrams are - required. http://asciiflow.com/ is a very nice tool to assist with making - ascii diagrams. The reason for this is that the tool used to review specs is - based purely on plain text. Plain text will allow review to proceed without - having to look at additional files which can not be viewed in gerrit. It - will also allow inline feedback on the diagram itself. - -* If your specification proposes any changes to the Nova REST API such as - changing parameters which can be returned or accepted, or even the semantics - of what happens when a client calls into the API, then you should add the - ``APIImpact`` flag to the commit message. Specs and patches with the - ``APIImpact`` flag can be found with the following query: - - https://review.openstack.org/#/q/status:open+project:openstack/senlin+message:apiimpact,n,z - - -Problem description -=================== - -A detailed description of the problem. What problem is this spec addressing? - -Use Cases ---------- - -What use cases does this address? -What are the impacts on actors (developer, end user, deployer etc.)? - -Proposed change -=============== - -Detail here the changes you propose to make with the scope clearly defined. - -At this point, if you would like to just get feedback on if the problem and -proposed change fit in senlin, you can stop here and post this for review to -get early feedback. - -Alternatives ------------- - -What are the other ways we could do this? Why aren't we using those? - -This doesn't have to be a full literature review, but it should demonstrate -that thought has been put into why the proposed solution is an appropriate one. - -Data model impact ------------------ - -What are the new data objects and/or database schema changes, if any? - -What database migrations will accompany this change? - -How will the initial set of new data objects be generated? -For example if you need to consider the existing resources or modify other -existing data, describe how that will work. - -REST API impact ---------------- - -For each API added/changed, clarify the followings: - -* Method Specification - - - A description of what the method does, suitable for use in user doc; - - - Method type (POST/PUT/PATCH/GET/DELETE) - - - Normal http response code(s) - - - Expected error http response code(s) - - + A description for each possible error code should be included describing - semantic errors which can cause it such as inconsistent parameters - supplied to the method, or when an object is not in an appropriate state - for the request to succeed. Errors caused by syntactic problems covered - by the JSON schema definition do not need to be included. - - - URL for the resource - - + URL should not include underscores, and use hyphens instead. - - - Parameters which can be passed via the URL - - - Request body definition in JSON schema, if any, with sample - - * Field names should use snake_case style, not CamelCase - - - Response body definition in JSON schema, if any, with sample - - * Field names should use snake_case style, not CamelCase - -* Policy changes to be introduced - - - Other things a deployer needs to think about when defining their policy. - -Note that the request/response schema should be defined as restrictively as -possible. Parameters which are required should be marked as such and only -under exceptional circumstances should additional parameters which are not -defined in the schema be permitted. - -Reuse of existing predefined parameter types such as regexps for passwords and -user defined names is highly encouraged. - -Security impact ---------------- - -Describe any potential security impact on the system. Some of the items to -consider include: - -* Does this change touch sensitive data such as tokens, keys, or user data? - -* Does this change alter the API in a way that may impact security, such as - a new way to access sensitive information or a new way to login? - -* Does this change involve cryptography or hashing? - -* Does this change require the use of sudo or any elevated privileges? - -* Does this change involve using or parsing user-provided data? This could - be directly at the API level or indirectly such as changes to a cache layer. - -* Can this change enable a resource exhaustion attack, such as allowing a - single API interaction to consume significant server resources? Examples - of this include launching subprocesses for each connection, or entity - expansion attacks in XML. - -For more detailed guidance, please see the OpenStack Security Guidelines as -a reference (https://wiki.openstack.org/wiki/Security/Guidelines). These -guidelines are a work in progress and are designed to help you identify -security best practices. For further information, feel free to reach out -to the OpenStack Security Group at openstack-security@lists.openstack.org. - -Notifications impact --------------------- - -Please specify any changes to notifications, including: - -- adding new notification, -- changing an existing notification, or -- removing a notification. - -Other end user impact ---------------------- - -Aside from the API, are there other ways a user will interact with this -feature? - -* Does this change have an impact on python-senlinclient? - -* What does the user interface there look like? - -Performance Impact ------------------- - -Describe any potential performance impact on the system, for example -how often will new code be called, and is there a major change to the calling -pattern of existing code. - -Examples of things to consider here include: - -* A periodic task manipulating a cluster node implies workload which will be - multiplied by the size of a cluster. - -* Any code interacting with backend services (e.g. nova or heat) may introduce - some latency which linear to the size of a cluster. - -* A small change in a utility function or a commonly used decorator can have a - large impacts on performance. - -* Calls which result in a database queries can have a profound impact on - performance when called in critical sections of the code. - -* Will the change include any locking, and if so what considerations are there - on holding the lock? - -Other deployer impact ---------------------- - -Other impacts on how you deploy and configure OpenStack, such as: - -* What config options are being added? Should they be more generic than - proposed? Will the default values work well in real deployments? - -* Is this a change that takes immediate effect after its merged, or is it - something that has to be explicitly enabled? - -* If this change involves a new binary, how would it be deployed? - -* Please state anything that those doing continuous deployment, or those - upgrading from the previous release, need to be aware of. Also describe - any plans to deprecate configuration values or features. - -Developer impact ----------------- - -Discuss things that will affect other developers, such as: - -* If the blueprint proposes a change to the driver API, discussion of how - other drivers would implement the feature is required. - -* Does this change have an impact on openstacksdk? - - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - - -Other contributors: - - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - - -Dependencies -============ - -* Include specific references to specs and/or blueprints, or in other - projects, that this one either depends on or is related to. - -* If this requires functionality of another project that is not currently - used by senlin, document that fact. - -* Does this feature require any new library dependencies or code otherwise - not included in OpenStack? Or does it depend on a specific version of - library? - - -Testing -======= - -Please discuss how the change will be tested, especially what tempest tests -will be added. It is assumed that unit test coverage will be added so that -doesn't need to be mentioned explicitly, but discussion of why you think -unit tests are sufficient and we don't need to add more tempest tests would -need to be included. - -Please discuss the important scenarios needed to test here, as well as -specific edge cases we should be ensuring work correctly. For each -scenario please specify if this requires a full openstack environment, or -can be simulated inside the senlin tree. - - -Documentation Impact -==================== - -Which audiences are affected most by this change, and which documentation -titles on docs.openstack.org should be updated because of this change? - -Don't repeat details discussed above, but reference them here in the context of -documentation for multiple audiences. For example, the Operations Guide targets -cloud operators, and the End User Guide would need to be updated if the change -offers a new feature available through the CLI or dashboard. If a config option -changes or is deprecated, note here that the documentation needs to be updated -to reflect this specification's change. - -References -========== - -Please add any useful references here. You are not required to have any -reference. Moreover, this specification should still make sense when your -references are unavailable. Examples of what you could include are: - -* Links to mailing list or IRC discussions - -* Links to notes from a summit session - -* Links to relevant research, if appropriate - -* Related specifications as appropriate - -* Anything else you feel it is worthwhile to refer to - - -History -======= - -Optional section intended to be used each time the spec is updated to describe -new design, API or any database schema updated. Useful to let reader understand -what's happened along the time. - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Ocata - - Introduced diff --git a/doc/specs/workflow-recover.rst b/doc/specs/workflow-recover.rst deleted file mode 100644 index 7616787f3..000000000 --- a/doc/specs/workflow-recover.rst +++ /dev/null @@ -1,172 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Support Workflow Service as Recover Action -========================================== - - -Nowadays, Senlin supports many different actions for the purpose of cluster -management. Especially for auto-healing use case, Senlin provides check -and recover to support customizable loop by health policy. Where three -kinds of detection types can be chosen: NODE_STATUS_POLLING, LB_STATUS_POLLING, -VM_LIFECYCLE_EVENTS. Once any failure is detected of the given type, recover -action can be executed automatically or manually. Also in the health policy, -users can define list of actions under recovery category, which can be -applied in order on a failed node. - -Some simple recover actions can be embedded into the Senlin like rebuild, or -recreate. But some complex actions are a chain of simple actions. For an example, -evacuation of VM servers needs to verify if the targeted node can be evacuated, -then execute the action, and confirmation is often needed to check if the action -succeeds or not. To support these cases, this spec targets to extend Senlin -to integrate with mistral workflow service so as to trigger the user-defined -workflow for the recover options. - -Problem description -=================== - -This spec is to extend senlin to support mistral workflow for more complex -and customizable recover actions. - -Use Cases ---------- - -One typical use case is to allow users to introduce their own or existing -mistral workflow as an option of recover action, or special processing before -or after some given recover action. - -Proposed change -=============== - -The proposed change will include three parts: -* driver: to add mistral support into Senlin -* profile: to add workflow support as one of recover action. -* cloud/node_action: to support chain of actions defined as recover behaviour. -* health policy: The health policy spec will be changed to support workflow as - the recover action and include parameters needed to execute - the workflow. In the health policy, the workflow can also be - executed before or after some defined recover action. - Below is an example: - - recovery: - actions: - - name: REBUILD - - name: WORKFLOW - params: - workflow_name: node_migration - inputs: - host: Target_host - -* example: to add sample workflow definitions and health policy for Senlin -           users to create an end2end story. - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None in the first version - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -If there is mistral installed inside the same environment and the users want to leverage -the workflow functions, this spec provides support to integrate Senlin and mistral for -the auto-healing purpose. - -One thing worth more attention is that the debug and trouble shooting of the user workflow -is not in the scope of this integration. This spec targets to provide a channel for users -to bring into their own trusted workflow into the Senlin auto-healing loop and work together -with all the embedded ations. - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -lxinhui@vmware.com - -Work Items ----------- - -The primary work items in Senlin will focus on adding a new driver for mistral and -implements of do_recover in profile. - -Dependencies -============ - -* Mistral: need to migrate the current APIs into the versioned. - -* Openstacksdk: need to support workflow service. - - -Testing -======= - -Unit tests will be provided. End2End test will be provided as examples for Senlin -users. - - -Documentation Impact -==================== - -None - -References -========== - -[1] Mistral patch about API migration: -    https://review.openstack.org/414755 -[2] Openstacksdk patch about the support of mistral service: -    https://review.openstack.org/414919 - -History -======= - -None - -.. list-table:: Revisions -   :header-rows: 1 - -   * - Release Name -     - Description -   * - Ocata -     - Introduced diff --git a/etc/senlin/README-senlin.conf.txt b/etc/senlin/README-senlin.conf.txt deleted file mode 100644 index 44bb0d68d..000000000 --- a/etc/senlin/README-senlin.conf.txt +++ /dev/null @@ -1,4 +0,0 @@ -To generate the sample senlin.conf file, run the following -command from the top level of the senlin directory: - -tox -egenconfig diff --git a/etc/senlin/api-paste.ini b/etc/senlin/api-paste.ini deleted file mode 100644 index e3a5327c1..000000000 --- a/etc/senlin/api-paste.ini +++ /dev/null @@ -1,48 +0,0 @@ - -# senlin-api pipeline -[pipeline:senlin-api] -pipeline = cors http_proxy_to_wsgi request_id faultwrap versionnegotiation osprofiler webhook authtoken context trust apiv1app - -[app:apiv1app] -paste.app_factory = senlin.api.common.wsgi:app_factory -senlin.app_factory = senlin.api.openstack.v1.router:API - -# Middleware to set x-openstack-request-id in http response header -[filter:request_id] -paste.filter_factory = oslo_middleware.request_id:RequestId.factory - -[filter:faultwrap] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:fault_filter - -[filter:context] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:context_filter -oslo_config_project = senlin - -[filter:versionnegotiation] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:version_filter - -[filter:trust] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:trust_filter - -[filter:webhook] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:webhook_filter - -[filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory -oslo_config_project = senlin - -# Auth middleware that validates token against keystone -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = senlin diff --git a/examples/policies/WIP/batching_1_1_0.yaml b/examples/policies/WIP/batching_1_1_0.yaml deleted file mode 100644 index 70c43b7cf..000000000 --- a/examples/policies/WIP/batching_1_1_0.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Sample batching policy -type: senlin.policy.batching -version: 1.0 -description: A policy for generating batches for cluster operations. -properties: - # Min number of nodes in service when doing cluster-wide operations - min_in_service: 1 - - # Max number of nodes that can be operated simultaneously - max_batch_size: 1 - - # Number of seconds between batches - pause_time: 0 diff --git a/examples/policies/WIP/health_policy_lb.yaml b/examples/policies/WIP/health_policy_lb.yaml deleted file mode 100644 index 26c227dd1..000000000 --- a/examples/policies/WIP/health_policy_lb.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Sample health policy based on monitoring using LBaaS service -type: senlin.policy.health -version: 1.0 -description: A policy for maintaining node health from a cluster. -properties: - detection: - # Type for health checking, valid values include: - # NODE_STATUS_POLLING, LB_STATUS_POLLING, VM_EVENT_LISTENING - type: LB_STATUS_POLLING - - # Detailed specification for the checking type - options: - # Min time in seconds between regular connection of the member - deplay: 5 - - # Max time in seconds for a monitor to wait for a connection - # to establish before it times out - timeout: 10 - - # Predefined health monitor types, valid values include one of: - # PING, TCP, HTTP, HTTPS - type: HTTP - - # Number of permissible connection failures before changing the - # node status to INACTIVE - max_retries: 3 - - # HTTP method used for requests by the monitor of type HTTP - http_method: GET - - # List of HTTP status codes expected in response from a member - # to declare it healthy - expected_codes: [200] - - # HTTP path used in HTTP request by monitor for health testing - url_path: /health_status - - recovery: - # List of actions that can be retried on a failed node - actions: - - REBOOT - - REBUILD - - MIGRATE - - EVACUATE - - RECREATE - - # List of services that are to be fenced - fencing: - - COMPUTE - - STORAGE - - NETWORK diff --git a/examples/policies/WIP/lb_policy_aws.spec b/examples/policies/WIP/lb_policy_aws.spec deleted file mode 100644 index 78d53bfdb..000000000 --- a/examples/policies/WIP/lb_policy_aws.spec +++ /dev/null @@ -1,21 +0,0 @@ -# Sample load-balancing policy modled after AWS ELB load-balancer - -# TODO(Qiming): Rework this based on ELB spec -AvailabilityZones: [] -Instances: [] -Listeners: - - InstancePort: 80 - LoadBalancerPort: 80 - Protocol: HTTP - SSLCertificateId: MyCertificate - PolicyNames: - - PolicyA - - PolicyB -AppCookieStickinessPolicy: - - What -LBCookieStickienessPolicy: - - What -SecurityGroups: - - ssh_group -Subnets: - - private_sub_net_01 diff --git a/examples/policies/affinity_policy.yaml b/examples/policies/affinity_policy.yaml deleted file mode 100644 index cfa838d6c..000000000 --- a/examples/policies/affinity_policy.yaml +++ /dev/null @@ -1,8 +0,0 @@ -type: senlin.policy.affinity -version: 1.0 -properties: - servergroup: - name: web_servers - policies: anti-affinity - availability_zone: az01 - enable_drs_extension: false diff --git a/examples/policies/batch_policy.yaml b/examples/policies/batch_policy.yaml deleted file mode 100644 index dd2b9e594..000000000 --- a/examples/policies/batch_policy.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Sample batch policy that can be attached to a cluster -type: senlin.policy.batch -version: 1.0 -properties: - # Minimum number of nodes that should remain in service when - # performing actions like CLUSTER_UPDATE. - min_in_service: 1 - - # Maximum number of nodes that can be processed at the - # same time. - max_batch_size: 2 - - # Number of seconds between two consecutive batches of - # operations. A value of 0 means no pause time. - pause_time: 3 diff --git a/examples/policies/deletion_policy.yaml b/examples/policies/deletion_policy.yaml deleted file mode 100644 index 464162e35..000000000 --- a/examples/policies/deletion_policy.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Sample deletion policy that can be attached to a cluster. -type: senlin.policy.deletion -version: 1.0 -description: A policy for choosing victim node(s) from a cluster for deletion. -properties: - # The valid values include: - # OLDEST_FIRST, OLDEST_PROFILE_FIRST, YOUNGEST_FIRST, RANDOM - criteria: OLDEST_FIRST - - # Whether deleted node should be destroyed - destroy_after_deletion: True - - # Length in number of seconds before the actual deletion happens - # This param buys an instance some time before deletion - grace_period: 60 - - # Whether the deletion will reduce the desired capacity of - # the cluster as well - reduce_desired_capacity: False diff --git a/examples/policies/deletion_policy_lifecycle_hook.yaml b/examples/policies/deletion_policy_lifecycle_hook.yaml deleted file mode 100644 index 4f5eea948..000000000 --- a/examples/policies/deletion_policy_lifecycle_hook.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Sample deletion policy that can be attached to a cluster. -type: senlin.policy.deletion -version: 1.1 -description: A policy for choosing victim node(s) from a cluster for deletion. -properties: - hooks: - # type of lifecycle hook - type: zaqar - params: - # Name of zaqar queue to receive lifecycle hook message - queue: zaqar_queue_name - # Length in number of seconds before the actual deletion happens - timeout: 180 - diff --git a/examples/policies/health_policy_event.yaml b/examples/policies/health_policy_event.yaml deleted file mode 100644 index 4e0bcf27e..000000000 --- a/examples/policies/health_policy_event.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Sample health policy based on VM lifecycle events -type: senlin.policy.health -version: 1.1 -description: A policy for maintaining node health from a cluster. -properties: - detection: - detection_modes: - # Type for health checking, valid values include: - # NODE_STATUS_POLLING, NODE_STATUS_POLL_URL, LIFECYCLE_EVENTS - - type: LIFECYCLE_EVENTS - - recovery: - # Action that can be retried on a failed node, will improve to - # support multiple actions in the future. Valid values include: - # REBOOT, REBUILD, RECREATE - actions: - - name: RECREATE diff --git a/examples/policies/health_policy_poll.yaml b/examples/policies/health_policy_poll.yaml deleted file mode 100644 index 85b207330..000000000 --- a/examples/policies/health_policy_poll.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Sample health policy based on node health checking -type: senlin.policy.health -version: 1.1 -description: A policy for maintaining node health from a cluster. -properties: - detection: - # Number of seconds between two adjacent checking - interval: 600 - - detection_modes: - # Type for health checking, valid values include: - # NODE_STATUS_POLLING, NODE_STATUS_POLL_URL, LIFECYCLE_EVENTS - - type: NODE_STATUS_POLLING - - recovery: - # Action that can be retried on a failed node, will improve to - # support multiple actions in the future. Valid values include: - # REBOOT, REBUILD, RECREATE - actions: - - name: RECREATE diff --git a/examples/policies/health_policy_poll_url.yaml b/examples/policies/health_policy_poll_url.yaml deleted file mode 100644 index 7ff91fd6d..000000000 --- a/examples/policies/health_policy_poll_url.yaml +++ /dev/null @@ -1,19 +0,0 @@ -type: senlin.policy.health -version: 1.1 -description: A policy for maintaining node health by polling a URL -properties: - detection: - interval: 120 - node_update_timeout: 240 - detection_modes: - - type: NODE_STATUS_POLL_URL - options: - poll_url: "http://myhealthservice/health/node/{nodename}" - poll_url_healthy_response: "passing" - poll_url_retry_limit: 3 - poll_url_retry_interval: 2 - recovery: - actions: - - name: RECREATE - node_delete_timeout: 90 - node_force_recreate: True diff --git a/examples/policies/lb_policy.yaml b/examples/policies/lb_policy.yaml deleted file mode 100644 index 06e1bcfbb..000000000 --- a/examples/policies/lb_policy.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# Load-balancing policy spec using Neutron LBaaS service -type: senlin.policy.loadbalance -version: 1.1 -description: A policy for load-balancing the nodes in a cluster. -properties: - pool: - # Protocol used for load balancing - protocol: HTTP - - # Port on which servers are running on the members - protocol_port: 80 - - # Name or ID of subnet for the port on which members can be - # connected. - subnet: private-subnet - - # Valid values include: ROUND_ROBIN, LEAST_CONNECTIONS, SOURCE_IP - lb_method: ROUND_ROBIN - - session_persistence: - # type of session persistence, valid values include: - # SOURCE_IP, HTTP_COOKIE, APP_COOKIE, NONE - type: SOURCE_IP - # Name of cookie if type set to APP_COOKIE - cookie_name: whatever - - # ID of pool for the cluster on which nodes can be connected. - # id: - - vip: - # Name or ID of Subnet on which VIP address will be allocated - subnet: public-subnet - - # IP address of the VIP - # address:
- - # Max #connections per second allowed for this VIP - connection_limit: 500 - - # Protocol used for VIP - protocol: HTTP - - # TCP port to listen on - protocol_port: 80 - - health_monitor: - # The type of probe sent by the load balancer to verify the member state, - # can be PING, TCP, HTTP, or HTTPS. - type: 'PING' - - # The amount of time, in milliseconds, between sending probes to members. - delay: 10000 - - # The maximum time in milliseconds that a monitor waits to connect before - # it times out. This value must be less than the delay value. - timeout: 5000 - - # The number of allowed connection failures before changing the status - # of the member to INACTIVE. A valid value is from 1 to 10. - max_retries: 4 - - # The HTTP method that the monitor uses for requests. - http_method: 'GET' - - # The HTTP path of the request sent by the monitor to test the health of - # a member. A string value that must begin with the forward slash '/'. - url_path: '/index.html' - - # Expected HTTP codes for a passing HTTP(S) monitor. - expected_codes: '200, 202' - - # ID of the health manager for the loadbalancer. - # id: - - # Time in second to wait for loadbalancer to become ready before and after - # senlin requests lbaas V2 service for lb operations. - lb_status_timeout: 300 - - # Name or ID of loadbalancer for the cluster on which nodes can be connected. - # loadbalancer: diff --git a/examples/policies/placement_region.yaml b/examples/policies/placement_region.yaml deleted file mode 100644 index 823c0d074..000000000 --- a/examples/policies/placement_region.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# Sample placement policy for cross-region placement -type: senlin.policy.region_placement -version: 1.0 -description: A policy for node placement across regions -properties: - regions: - - name: RegionOne - weight: 100 - cap: 150 - - name: RegionTwo - weight: 100 - cap: 200 diff --git a/examples/policies/placement_zone.yaml b/examples/policies/placement_zone.yaml deleted file mode 100644 index 194ef01cb..000000000 --- a/examples/policies/placement_zone.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Sample placement policy for cross-availability-zone placement -type: senlin.policy.zone_placement -version: 1.0 -description: A policy for node placement across availability zones -properties: - zones: - - name: zone1 - weight: 100 - - name: zone2 - weight: 100 diff --git a/examples/policies/scaling_policy.yaml b/examples/policies/scaling_policy.yaml deleted file mode 100644 index 3e87a1521..000000000 --- a/examples/policies/scaling_policy.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Sample scaling policy that can be attached to a cluster -type: senlin.policy.scaling -version: 1.0 -properties: - event: CLUSTER_SCALE_IN - adjustment: - # Adjustment type, valid values include: - # EXACT_CAPACITY, CHANGE_IN_CAPACITY, CHANGE_IN_PERCENTAGE - type: CHANGE_IN_CAPACITY - - # A number that will be interpreted based on the type setting. - number: 1 - - # When type is set CHANGE_IN_PERCENTAGE, min_step specifies - # that the cluster size will be changed by at least the number - # of nodes specified here. - min_step: 1 - - # When scaling operation will break the size limitation of - # cluster, whether to do best effort scaling, e.g. decrease - # cluster size to min_size or increase cluster size to max_size - # Default False means reject scaling request directly. - best_effort: True - - # Number of seconds before allowing the cluster to be resized again. - cooldown: 120 diff --git a/examples/profiles/README.rst b/examples/profiles/README.rst deleted file mode 100644 index 15d06e5be..000000000 --- a/examples/profiles/README.rst +++ /dev/null @@ -1,28 +0,0 @@ -How To Use the Sample Spec File -=============================== - -This directory contains sample spec files that can be used to create a Senlin -profile using :command:`openstack cluster profile create` command, for example: - -To create an os.nova.server profile:: - - $ cd ./nova_server - $ openstack cluster profile create --spec-file cirros_basic.yaml my_server - -To create an os.heat.stack profile:: - - $ cd ./heat_stack/nova_server - $ openstack cluster profile create --spec-file heat_stack_nova_server.yaml my_stack - -To create a container.dockerinc.docker profile:: - - $ cd ./docker_container - $ openstack cluster profile create --spec-file docker_basic.yaml my_docker - -To get help on the command line options for creating profiles:: - - $ openstack help cluster profile create - -To show the profile created:: - - $ openstack cluster profile show diff --git a/examples/profiles/docker_container/docker_basic.yaml b/examples/profiles/docker_container/docker_basic.yaml deleted file mode 100644 index c19171c05..000000000 --- a/examples/profiles/docker_container/docker_basic.yaml +++ /dev/null @@ -1,11 +0,0 @@ -type: container.dockerinc.docker -version: 1.0 -properties: - #name: docker_container - image: hello-world - command: '/bin/sleep 30' - host_node: 58736d36-271a-47e7-816d-fb7927a7cd95 - host_cluster: b3283baf-c199-49fc-a5b7-f2b301b15a3d - port: 2375 - context: - region_name: RegionOne diff --git a/examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml b/examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml deleted file mode 100644 index 19e1c4064..000000000 --- a/examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml +++ /dev/null @@ -1,9 +0,0 @@ -type: os.heat.stack -version: 1.0 -properties: - name: nova_server_stack - template: nova_server_template.yaml - parameters: - server_name: my_cirros_server - context: - region_name: RegionOne diff --git a/examples/profiles/heat_stack/nova_server/nova_server_template.yaml b/examples/profiles/heat_stack/nova_server/nova_server_template.yaml deleted file mode 100644 index fadc24aff..000000000 --- a/examples/profiles/heat_stack/nova_server/nova_server_template.yaml +++ /dev/null @@ -1,56 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - A HOT template that holds a VM instance with a Neutron port created in - given private network and a floatingIP created in given external network. - -parameters: - server_name: - type: string - description: Name for the instance to be created - default: my_server - flavor: - type: string - description: Flavor for the instance to be created - default: m1.tiny - image: - type: string - description: Name or ID of the image to use for the instance. - default: cirros-0.3.5-x86_64-disk - public_net: - type: string - description: ID or name of public network where floating IP to be created - default: public - private_net: - type: string - description: ID or name of private network into which servers get deployed - default: private - -resources: - my_server: - type: OS::Nova::Server - properties: - name: { get_param: server_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: - - port: { get_resource: server_port } - - server_port: - type: OS::Neutron::Port - properties: - network: { get_param: private_net } - - server_floating_ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: public_net } - port_id: { get_resource: server_port } - -outputs: - server_private_ip: - description: IP address of my_server in private network - value: { get_attr: [ server_port, fixed_ips, 0, ip_address ] } - server_public_ip: - description: Floating IP address of my_server in public network - value: { get_attr: [ server_floating_ip, floating_ip_address ] } diff --git a/examples/profiles/heat_stack/random_string/heat_stack_random_string.yaml b/examples/profiles/heat_stack/random_string/heat_stack_random_string.yaml deleted file mode 100644 index d6cc8880b..000000000 --- a/examples/profiles/heat_stack/random_string/heat_stack_random_string.yaml +++ /dev/null @@ -1,7 +0,0 @@ -type: os.heat.stack -version: 1.0 -properties: - name: random_string_stack - template: random_string_template.yaml - context: - region_name: RegionOne diff --git a/examples/profiles/heat_stack/random_string/random_string_template.yaml b/examples/profiles/heat_stack/random_string/random_string_template.yaml deleted file mode 100644 index d146a7498..000000000 --- a/examples/profiles/heat_stack/random_string/random_string_template.yaml +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - str_length: - type: number - default: 64 -resources: - random: - type: OS::Heat::RandomString - properties: - length: {get_param: str_length} -outputs: - result: - value: {get_attr: [random, value]} diff --git a/examples/profiles/nova_server/cirros_basic.yaml b/examples/profiles/nova_server/cirros_basic.yaml deleted file mode 100644 index 125353b53..000000000 --- a/examples/profiles/nova_server/cirros_basic.yaml +++ /dev/null @@ -1,14 +0,0 @@ -type: os.nova.server -version: 1.0 -properties: - name: cirros_server - flavor: 1 - image: "cirros-0.4.0-x86_64-disk" - key_name: oskey - networks: - - network: private - metadata: - test_key: test_value - user_data: | - #!/bin/sh - echo 'hello, world' > /tmp/test_file diff --git a/install.sh b/install.sh deleted file mode 100755 index 4b3b99b7d..000000000 --- a/install.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash - -if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" >&2 - exit 1 -fi - -# Install prefix for config files (e.g. "/usr/local"). -# Leave empty to install into /etc -CONF_PREFIX="" -LOG_DIR=/var/log/senlin - - -install -d $LOG_DIR - -detect_rabbit() { - PKG_CMD="rpm -q" - RABBIT_PKG="rabbitmq-server" - QPID_PKG="qpid-cpp-server" - - # Detect OS type - # Ubuntu has an lsb_release command which allows us to detect if it is Ubuntu - if lsb_release -i 2>/dev/null | grep -iq ubuntu - then - PKG_CMD="dpkg -s" - QPID_PKG="qpidd" - fi - if $PKG_CMD $RABBIT_PKG > /dev/null 2>&1 - then - if ! $PKG_CMD $QPID_PKG > /dev/null 2>&1 - then - return 0 - fi - fi - return 1 -} - -# Determinate is the given option present in the INI file -# ini_has_option config-file section option -function ini_has_option() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - [ -n "$line" ] -} - -# Set an option in an INI file -# iniset config-file section option value -function iniset() { - local file=$1 - local section=$2 - local option=$3 - local value=$4 - if ! grep -q "^\[$section\]" "$file"; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - fi - if ! ini_has_option "$file" "$section" "$option"; then - # Add it - sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - else - # Replace it - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" - fi -} - -basic_configuration() { - conf_path=$1 - if echo $conf_path | grep ".conf$" >/dev/null 2>&1 - then - iniset $target DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` - iniset $target database connection "mysql+pymysql://senlin:senlin@localhost/senlin?charset=utf8" - - BRIDGE_IP=127.0.0.1 - - if detect_rabbit - then - echo "rabbitmq detected, configuring $conf_path for rabbit" >&2 - iniset $conf_path DEFAULT rpc_backend kombu - iniset $conf_path oslo_messaging_rabbit rabbit_password guest - else - echo "qpid detected, configuring $conf_path for qpid" >&2 - iniset $conf_path DEFAULT rpc_backend qpid - fi - fi -} - -install_dir() { - local dir=$1 - local prefix=$2 - - for fn in $(ls $dir); do - f=$dir/$fn - target=$prefix/$f - if [ $fn = 'senlin.conf.sample' ]; then - target=$prefix/$dir/senlin.conf - fi - if [ -d $f ]; then - [ -d $target ] || install -d $target - install_dir $f $prefix - elif [ -f $target ]; then - echo "NOT replacing existing config file $target" >&2 - diff -u $target $f - else - echo "Installing $fn in $prefix/$dir" >&2 - install -m 664 $f $target - if [ $fn = 'senlin.conf.sample' ]; then - basic_configuration $target - fi - fi - done -} - -install_dir etc $CONF_PREFIX - -python setup.py install >/dev/null -rm -rf build senlin.egg-info diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/notes/Switch-to-alembic-migrations-f442d0b58c3f13a6.yaml b/releasenotes/notes/Switch-to-alembic-migrations-f442d0b58c3f13a6.yaml deleted file mode 100644 index 23cd57c2c..000000000 --- a/releasenotes/notes/Switch-to-alembic-migrations-f442d0b58c3f13a6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - Senlin will now use Alembic migrations for database schema updates. diff --git a/releasenotes/notes/Updated-for-SQLAlchemy-2.x-ee6831e5a95d3658.yaml b/releasenotes/notes/Updated-for-SQLAlchemy-2.x-ee6831e5a95d3658.yaml deleted file mode 100644 index 82d8b96f9..000000000 --- a/releasenotes/notes/Updated-for-SQLAlchemy-2.x-ee6831e5a95d3658.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed compatibility issues with SQLAlchemy 2.x. diff --git a/releasenotes/notes/acess-control-admin-project-762c8e91e8875738.yaml b/releasenotes/notes/acess-control-admin-project-762c8e91e8875738.yaml deleted file mode 100644 index 648378db3..000000000 --- a/releasenotes/notes/acess-control-admin-project-762c8e91e8875738.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Supported admin user can see details of any cluster profile. diff --git a/releasenotes/notes/action-policy-optimization-06ea45eb3dcbe33a.yaml b/releasenotes/notes/action-policy-optimization-06ea45eb3dcbe33a.yaml deleted file mode 100644 index 6861ebbd2..000000000 --- a/releasenotes/notes/action-policy-optimization-06ea45eb3dcbe33a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - The retrieval of some resources such as actions and policies are optimized - to avoid object instantiation. diff --git a/releasenotes/notes/action-purge-11db5d8018b8389a.yaml b/releasenotes/notes/action-purge-11db5d8018b8389a.yaml deleted file mode 100644 index 66dfec49a..000000000 --- a/releasenotes/notes/action-purge-11db5d8018b8389a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A ``action_purge`` subcommand is added to ``senlin-manage`` tool for purging actions - from the actions table. diff --git a/releasenotes/notes/action-update-api-fc51b1582c0b5902.yaml b/releasenotes/notes/action-update-api-fc51b1582c0b5902.yaml deleted file mode 100644 index d20ff1d99..000000000 --- a/releasenotes/notes/action-update-api-fc51b1582c0b5902.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - [`blueprint action-update `_] - A new action update API is added to allow the action status to be updated. - The only valid status value for update is CANCELLED. - diff --git a/releasenotes/notes/add-action-filter-40e775a26082f780.yaml b/releasenotes/notes/add-action-filter-40e775a26082f780.yaml deleted file mode 100644 index 84f3fd02d..000000000 --- a/releasenotes/notes/add-action-filter-40e775a26082f780.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Add cluster_id as a parameter in query action APIs. - This allow we can filter result returned from API instead by received - so many result action. diff --git a/releasenotes/notes/add-availability_zone-option-to-loadbalancer-74b512fb0c138bfe.yaml b/releasenotes/notes/add-availability_zone-option-to-loadbalancer-74b512fb0c138bfe.yaml deleted file mode 100644 index 5c2d373fa..000000000 --- a/releasenotes/notes/add-availability_zone-option-to-loadbalancer-74b512fb0c138bfe.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add availability_zone option for loadbalancers. This is supported by - Octavia starting in the Ussuri release. \ No newline at end of file diff --git a/releasenotes/notes/affinity-policy-fix-72ae92dc8ffcff00.yaml b/releasenotes/notes/affinity-policy-fix-72ae92dc8ffcff00.yaml deleted file mode 100644 index c4fe99cb9..000000000 --- a/releasenotes/notes/affinity-policy-fix-72ae92dc8ffcff00.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed a bug in affinity policy where the calls to nova driver was wrong. diff --git a/releasenotes/notes/api-ref-fixes-19bc963430c32ecf.yaml b/releasenotes/notes/api-ref-fixes-19bc963430c32ecf.yaml deleted file mode 100644 index 5403a43d4..000000000 --- a/releasenotes/notes/api-ref-fixes-19bc963430c32ecf.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The new API documentation include fixes to the header like 'location', - 'OpenStack-Request-Id' and responses during version negotiation. diff --git a/releasenotes/notes/az-info-9344b8d54c0b2665.yaml b/releasenotes/notes/az-info-9344b8d54c0b2665.yaml deleted file mode 100644 index 1d896c0fb..000000000 --- a/releasenotes/notes/az-info-9344b8d54c0b2665.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The bug where the availability zone info from a nova server deployment - was not available has been fixed. diff --git a/releasenotes/notes/batch-scheduling-ca5d98d41fc72973.yaml b/releasenotes/notes/batch-scheduling-ca5d98d41fc72973.yaml deleted file mode 100644 index 5fcf48999..000000000 --- a/releasenotes/notes/batch-scheduling-ca5d98d41fc72973.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Improved the action scheduler so that it can decide how many node - actions will be fired in each batch. Batch control is a throttling - measure to avoid raising too many requests in a short interval to - the backend services. diff --git a/releasenotes/notes/bdmv2-fix-b9ff742cdc282087.yaml b/releasenotes/notes/bdmv2-fix-b9ff742cdc282087.yaml deleted file mode 100644 index 76a4122da..000000000 --- a/releasenotes/notes/bdmv2-fix-b9ff742cdc282087.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The UUID used by the block_device_mapping_v2 in nova.server profile is - validated. diff --git a/releasenotes/notes/bug-1789488-75ee756a53722cd1.yaml b/releasenotes/notes/bug-1789488-75ee756a53722cd1.yaml deleted file mode 100644 index 0b236c76d..000000000 --- a/releasenotes/notes/bug-1789488-75ee756a53722cd1.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - [`bug 1789488 `_] - Perform deep validation of profile and policy schemas so that errors in - spec properties are detected. diff --git a/releasenotes/notes/bug-1811161-c6416ad27ab0a2ce.yaml b/releasenotes/notes/bug-1811161-c6416ad27ab0a2ce.yaml deleted file mode 100644 index b93ac9b5e..000000000 --- a/releasenotes/notes/bug-1811161-c6416ad27ab0a2ce.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - [`bug 1811161 `_] - Perform policy post-op even if action failed. This allows the health policy - to reenable health checks even if an action that failed. diff --git a/releasenotes/notes/bug-1811294-262d4b9cced3f505.yaml b/releasenotes/notes/bug-1811294-262d4b9cced3f505.yaml deleted file mode 100644 index 855aef837..000000000 --- a/releasenotes/notes/bug-1811294-262d4b9cced3f505.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - [`bug 1811294 `_] - Set owner field for actions created to wait for lifecycle completion. This - allows these actions to be cleaned up when the engine is restarted. diff --git a/releasenotes/notes/bug-1813089-db57e7bdfd3983ac.yaml b/releasenotes/notes/bug-1813089-db57e7bdfd3983ac.yaml deleted file mode 100644 index dded63707..000000000 --- a/releasenotes/notes/bug-1813089-db57e7bdfd3983ac.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - [`bug 1813089 `_] - This change picks the address when adding a node to a load balancer based on - the subnet ip version. This fix adds supports for nodes with - dual stack network. diff --git a/releasenotes/notes/bug-1815540-2664a975db5fafc8.yaml b/releasenotes/notes/bug-1815540-2664a975db5fafc8.yaml deleted file mode 100644 index 5b6d316af..000000000 --- a/releasenotes/notes/bug-1815540-2664a975db5fafc8.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - [`bug 1815540 `_] - Cluster recovery and node recovery API request bodies are changed to only accept a - single operation. Optional parameters for this operation are set in operation_params. diff --git a/releasenotes/notes/bug-1817379-23dd2c925259d5f2.yaml b/releasenotes/notes/bug-1817379-23dd2c925259d5f2.yaml deleted file mode 100644 index 21b4d2c1f..000000000 --- a/releasenotes/notes/bug-1817379-23dd2c925259d5f2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - [`bug 1817379 `_] - Delete ports before recovering a node. diff --git a/releasenotes/notes/bug-1817604-41d4b8f6c6f920e4.yaml b/releasenotes/notes/bug-1817604-41d4b8f6c6f920e4.yaml deleted file mode 100644 index 8a5089cc8..000000000 --- a/releasenotes/notes/bug-1817604-41d4b8f6c6f920e4.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - [`bug 1817604 `_] - Fixes major performance bugs within senlin by improving database - interaction. This was completed by updating the database models to - properly take advantage of relationships. Additionally removes - unnecessary database calls and prefers joins instead to retrieve - object data. diff --git a/releasenotes/notes/bug-1828856-bf7a30a6eb00238a.yaml b/releasenotes/notes/bug-1828856-bf7a30a6eb00238a.yaml deleted file mode 100644 index d16c12710..000000000 --- a/releasenotes/notes/bug-1828856-bf7a30a6eb00238a.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -fixes: - - | - Fixes bug where the webhook rejected additional parameters in the body for - mircoversion less than 1.10. Now with new webhook version 2, additional - parameters in the body will always be accepted regardless of the - microversion API passed in. -other: - - | - Introduces webhook version 2 that is returned when creating new webhook - receivers. Webhook version 1 receivers are still valid and will - continue to be accepted. diff --git a/releasenotes/notes/bug-2048099-74f0ca874cfbe6b4.yaml b/releasenotes/notes/bug-2048099-74f0ca874cfbe6b4.yaml deleted file mode 100644 index 75fea5cc7..000000000 --- a/releasenotes/notes/bug-2048099-74f0ca874cfbe6b4.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Addresses an issue related to the SCALE_IN action. The bug caused - the removal of only one node from the load balancer even when the - count of inputs was greater than 1. diff --git a/releasenotes/notes/bug-2048100-6b4156df956a6f14.yaml b/releasenotes/notes/bug-2048100-6b4156df956a6f14.yaml deleted file mode 100644 index ee6043c90..000000000 --- a/releasenotes/notes/bug-2048100-6b4156df956a6f14.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fix incorrect handling of actions causing node reduction in cluster - and load balancer with desired_capacity = min_size. The node remains in - the cluster, and its IP is no longer removed from the load balancer. diff --git a/releasenotes/notes/bug-2048452-8a690353815601a0.yaml b/releasenotes/notes/bug-2048452-8a690353815601a0.yaml deleted file mode 100644 index f663cdb9a..000000000 --- a/releasenotes/notes/bug-2048452-8a690353815601a0.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - [`bug 2048452 `_] - Fixed a bug where `TrustMiddleware` unable to fetch trusts/credentials - from Identity service, may be related to: - https://bugs.launchpad.net/keystone/+bug/1959674 - This bug is fixed by using `admin_token` instead of `token` auth method - to fetch trusts/credentials from Identity service. diff --git a/releasenotes/notes/bug-2048726-a830a7838661a41f.yaml b/releasenotes/notes/bug-2048726-a830a7838661a41f.yaml deleted file mode 100644 index e5165ba2b..000000000 --- a/releasenotes/notes/bug-2048726-a830a7838661a41f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed a bug where exception raised in the `_resolve_bdm` method of the - `senlin.profile.os.nova.server` profile would cause cluster cannot create - new nodes. diff --git a/releasenotes/notes/bug-2049191-8ee2d8352b05cfef.yaml b/releasenotes/notes/bug-2049191-8ee2d8352b05cfef.yaml deleted file mode 100644 index 5895abf8c..000000000 --- a/releasenotes/notes/bug-2049191-8ee2d8352b05cfef.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - Excluding CLUSTER_RESIZE from the list of actions that skip pre-op checks - if the cluster is already at the minimum threshold. When the cluster is at - the minimum threshold, pre-operation LB will function with actions such as - CLUSTER_DEL_NODES, CLUSTER_SCALE_IN, NODE_DELETE, and will skip actions - like CLUSTER_REPLACE_NODES and CLUSTER_RESIZE. diff --git a/releasenotes/notes/capacity-calculation-4fd389ff12107dfb.yaml b/releasenotes/notes/capacity-calculation-4fd389ff12107dfb.yaml deleted file mode 100644 index 0785600ec..000000000 --- a/releasenotes/notes/capacity-calculation-4fd389ff12107dfb.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - Fixed bug related to the desired_capacity calculation. The base number used - now is the current capacity of the cluster instead of previous 'desired' - capacity. This include all actions that change cluster capacity and all - related policies. diff --git a/releasenotes/notes/clean-actions-for-cluster-node-438ca5268e7fd258.yaml b/releasenotes/notes/clean-actions-for-cluster-node-438ca5268e7fd258.yaml deleted file mode 100644 index 4f2495359..000000000 --- a/releasenotes/notes/clean-actions-for-cluster-node-438ca5268e7fd258.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - When a cluster or a node is deleted, the action records associated with - them are now automatically deleted from database. diff --git a/releasenotes/notes/cluster-action-refresh-9eeb60f1f2c1d0abr.yaml b/releasenotes/notes/cluster-action-refresh-9eeb60f1f2c1d0abr.yaml deleted file mode 100644 index 6391e7c1a..000000000 --- a/releasenotes/notes/cluster-action-refresh-9eeb60f1f2c1d0abr.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added a cluster entity refresh to the cluster action execute wrapper which will make sure the - state of the action does not become stale while in queue. diff --git a/releasenotes/notes/cluster-check-interval-b01e8140cc83760e.yaml b/releasenotes/notes/cluster-check-interval-b01e8140cc83760e.yaml deleted file mode 100644 index 1f471048c..000000000 --- a/releasenotes/notes/cluster-check-interval-b01e8140cc83760e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - A new configuration option check_interval_max is added (default=3600) for - cluster health check intervals. diff --git a/releasenotes/notes/cluster-collect-90e460c7bfede347.yaml b/releasenotes/notes/cluster-collect-90e460c7bfede347.yaml deleted file mode 100644 index bbfa23cb7..000000000 --- a/releasenotes/notes/cluster-collect-90e460c7bfede347.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - A new ``cluster_collect`` API is added. diff --git a/releasenotes/notes/cluster-delete-conflict-94261706eb29e9bb.yaml b/releasenotes/notes/cluster-delete-conflict-94261706eb29e9bb.yaml deleted file mode 100644 index aa977743b..000000000 --- a/releasenotes/notes/cluster-delete-conflict-94261706eb29e9bb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The cluster delete API calls may return a 409 status code if there - are policies and/or receivers associated with it. Previously, we return - a 400 status code. diff --git a/releasenotes/notes/cluster-delete-with-policy-d2dca161e42ee6ba.yaml b/releasenotes/notes/cluster-delete-with-policy-d2dca161e42ee6ba.yaml deleted file mode 100644 index 918710022..000000000 --- a/releasenotes/notes/cluster-delete-with-policy-d2dca161e42ee6ba.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -prelude: > - Updated tests to work with updated cluster delete. -features: - - | - Allows the cluster delete actions to detach policies and delete receivers - for the cluster being deleted. This simplifies deleting clusters by not - having to detach or delete all dependancies from it beforehand. diff --git a/releasenotes/notes/cluster-desired-capacity-d876347f69b04b4f.yaml b/releasenotes/notes/cluster-desired-capacity-d876347f69b04b4f.yaml deleted file mode 100644 index 8bda99aa6..000000000 --- a/releasenotes/notes/cluster-desired-capacity-d876347f69b04b4f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - The 'desired_capacity' reflects the expectation from a requester's view - point. The engine now changes the 'desired_capacity' after the request - is validated/sanitized, before the action is actually implemented. This - means the 'desired_capacity' will change event if an action fails. diff --git a/releasenotes/notes/cluster-lock-e283fb9bf1002bca.yaml b/releasenotes/notes/cluster-lock-e283fb9bf1002bca.yaml deleted file mode 100644 index d2276eeef..000000000 --- a/releasenotes/notes/cluster-lock-e283fb9bf1002bca.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed cluster lock primary key conflict problem. diff --git a/releasenotes/notes/cluster-node-dependents-3bdbebd773d276d1.yaml b/releasenotes/notes/cluster-node-dependents-3bdbebd773d276d1.yaml deleted file mode 100644 index a82188a3f..000000000 --- a/releasenotes/notes/cluster-node-dependents-3bdbebd773d276d1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added dependents to clusters and nodes for recording other clusters/nodes - that depend on them. diff --git a/releasenotes/notes/cluster-node-status-e7fced162b415452.yaml b/releasenotes/notes/cluster-node-status-e7fced162b415452.yaml deleted file mode 100644 index c0505892b..000000000 --- a/releasenotes/notes/cluster-node-status-e7fced162b415452.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed cluster/node status setting after a cluster/node check operation. diff --git a/releasenotes/notes/cluster-ops-433a5aa608a0eb7f.yaml b/releasenotes/notes/cluster-ops-433a5aa608a0eb7f.yaml deleted file mode 100644 index 7c25e9509..000000000 --- a/releasenotes/notes/cluster-ops-433a5aa608a0eb7f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A new API "cluster-op" is introduced to trigger a profile type specific - operation on all nodes in a cluster. This API is available since API - micro-version 1.4. diff --git a/releasenotes/notes/cluster-recover-d87d429873b376db.yaml b/releasenotes/notes/cluster-recover-d87d429873b376db.yaml deleted file mode 100644 index 4c8ed58c5..000000000 --- a/releasenotes/notes/cluster-recover-d87d429873b376db.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed cluster-recover operation in engine so that it accepts parameters - from API requests in addition to policy decision (if any). diff --git a/releasenotes/notes/cluster-resize-fix-bee18840a98907d8.yaml b/releasenotes/notes/cluster-resize-fix-bee18840a98907d8.yaml deleted file mode 100644 index 36666766d..000000000 --- a/releasenotes/notes/cluster-resize-fix-bee18840a98907d8.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed a bug related to oslo.versionedobjects change that prevents cluster - actions to be properly encoded in JSON requests. diff --git a/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml b/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml deleted file mode 100644 index dbed938d5..000000000 --- a/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -prelude: > - This release alters the cluster_scale_in and cluster_scale_out actions to - no longer place the action into the actions table when a conflict is - detected. This behavior is an improvement on the old way actions are - processed as the requester will now receive immediate feedback from the - API when an action cannot be processed. This release also honors the - scaling action cooldown in the same manner by erring via the API when a - scaling action cannot be processed due to cooldown. -features: - - | - [`blueprint scaling-action-acceptance `_] - Scaling actions (IN or OUT) now validate that there is no conflicting - action already being processed and will return an error via the API - informing the end user if a conflict is detected. A conflicting action is - detected when new action of either `CLUSTER_SCALE_IN` or - `CLUSTER_SCALE_OUT` is attempted while there is already cluster scaling - action in the action table in a pending status (READY, RUNNING, WAITING, - ACTION_WAITING_LIFECYCLE_COMPLETION). - Additionally the cooldown will be checked and enforced when a scaling - action is requested. If the cooldown is being observed the requester will - be informed of this when submitting the action via an error. diff --git a/releasenotes/notes/cluster-status-update-dd9133092aef05ab.yaml b/releasenotes/notes/cluster-status-update-dd9133092aef05ab.yaml deleted file mode 100644 index 4344eee01..000000000 --- a/releasenotes/notes/cluster-status-update-dd9133092aef05ab.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fixed cluster status update logic so that cluster status is solely - determined by the status of its member nodes. The status is updated - each time a cluster operation has completed. diff --git a/releasenotes/notes/compute-instance-fencing-63b931cdf35b127c.yaml b/releasenotes/notes/compute-instance-fencing-63b931cdf35b127c.yaml deleted file mode 100644 index 26926c104..000000000 --- a/releasenotes/notes/compute-instance-fencing-63b931cdf35b127c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The senlin-engine now supports fencing a corrupted VM instance by deleting - it forcibly. diff --git a/releasenotes/notes/config-default-nova-timeout-f0bd73811ac3a8bb.yaml b/releasenotes/notes/config-default-nova-timeout-f0bd73811ac3a8bb.yaml deleted file mode 100644 index 66846c32a..000000000 --- a/releasenotes/notes/config-default-nova-timeout-f0bd73811ac3a8bb.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added a new config option to specify the timeout for Nova API calls. \ No newline at end of file diff --git a/releasenotes/notes/config-doc-cb8b37e360422301.yaml b/releasenotes/notes/config-doc-cb8b37e360422301.yaml deleted file mode 100644 index 562a703cc..000000000 --- a/releasenotes/notes/config-doc-cb8b37e360422301.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Senlin API/Engine configuration options are now documented and published - online. diff --git a/releasenotes/notes/config-scheduler-thread-pool-size-de608624a6cb4b43r.yaml b/releasenotes/notes/config-scheduler-thread-pool-size-de608624a6cb4b43r.yaml deleted file mode 100644 index 25cec4ec9..000000000 --- a/releasenotes/notes/config-scheduler-thread-pool-size-de608624a6cb4b43r.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added a scheduler thread pool size. diff --git a/releasenotes/notes/config-stop-node-before-delete-4ab08e61b40e4474.yaml b/releasenotes/notes/config-stop-node-before-delete-4ab08e61b40e4474.yaml deleted file mode 100644 index 0fd39bfad..000000000 --- a/releasenotes/notes/config-stop-node-before-delete-4ab08e61b40e4474.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added a new boolean cluster config option to stop node before delete for all cluster. diff --git a/releasenotes/notes/config-trust-roles-416e26e03036ae40.yaml b/releasenotes/notes/config-trust-roles-416e26e03036ae40.yaml deleted file mode 100644 index ee8e941b0..000000000 --- a/releasenotes/notes/config-trust-roles-416e26e03036ae40.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added a new list config option to allow trust roles to be overridden. - diff --git a/releasenotes/notes/container-ops-e57d096742202206.yaml b/releasenotes/notes/container-ops-e57d096742202206.yaml deleted file mode 100644 index e05d3f791..000000000 --- a/releasenotes/notes/container-ops-e57d096742202206.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Docker container profile now supports operations like restart, pause and - unpause. diff --git a/releasenotes/notes/container-profile-152bf2908c70ffad.yaml b/releasenotes/notes/container-profile-152bf2908c70ffad.yaml deleted file mode 100644 index e9469ba95..000000000 --- a/releasenotes/notes/container-profile-152bf2908c70ffad.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A new profile type 'container.dockerinc.docker-1.0' is added to support - creation and management of docker clusters. This is still an experimental - feature. Please use with caution. diff --git a/releasenotes/notes/db-action-retries-d471fe85b4510afd.yaml b/releasenotes/notes/db-action-retries-d471fe85b4510afd.yaml deleted file mode 100644 index 9a64b6914..000000000 --- a/releasenotes/notes/db-action-retries-d471fe85b4510afd.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - DB layer operations now feature some retries if there are transient errors. diff --git a/releasenotes/notes/db-ignore-project_safe-for-admins-2986f15e74cd1d1c.yaml b/releasenotes/notes/db-ignore-project_safe-for-admins-2986f15e74cd1d1c.yaml deleted file mode 100644 index 5442a192d..000000000 --- a/releasenotes/notes/db-ignore-project_safe-for-admins-2986f15e74cd1d1c.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - Admin role users can now access and modify all resources (clusters, nodes, - etc) regardless of which project that belong to. -security: - - | - Removed the restriction for admin role users that prevented access/changes - to resources (clusters, nodes, etc) belonging to projects not matching the - project used for authentication. Access for non-admin users is still - isolated to their project used for authentication. diff --git a/releasenotes/notes/db-locking-logic-9c97b04ce8c52989.yaml b/releasenotes/notes/db-locking-logic-9c97b04ce8c52989.yaml deleted file mode 100644 index 729204ebe..000000000 --- a/releasenotes/notes/db-locking-logic-9c97b04ce8c52989.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -Fixes: - - | - Fixed db locking logic to avoid deadlock. \ No newline at end of file diff --git a/releasenotes/notes/db-retries-da4a0d9d83ad56bb.yaml b/releasenotes/notes/db-retries-da4a0d9d83ad56bb.yaml deleted file mode 100644 index 0090c2a57..000000000 --- a/releasenotes/notes/db-retries-da4a0d9d83ad56bb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - All REST calls that involve a DB interaction are now automatically retried - upon deadlock exceptions. diff --git a/releasenotes/notes/delete-batch-a16ee5ed2512eab7.yaml b/releasenotes/notes/delete-batch-a16ee5ed2512eab7.yaml deleted file mode 100644 index eba0eb465..000000000 --- a/releasenotes/notes/delete-batch-a16ee5ed2512eab7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The support to CLUSTER_DELETE action from the experimental batch policy - is dropped due to issues on cluster locking. This could be resurected - in future when a proper workaround is identified. diff --git a/releasenotes/notes/delete_with_dependants-823c6c4921f22575.yaml b/releasenotes/notes/delete_with_dependants-823c6c4921f22575.yaml deleted file mode 100644 index 6f8f2729d..000000000 --- a/releasenotes/notes/delete_with_dependants-823c6c4921f22575.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Allow the cluster delete action to detach policies and delete receivers instead - of erroring. diff --git a/releasenotes/notes/deletion-policy-11bcb7c0e90bbfcc.yaml b/releasenotes/notes/deletion-policy-11bcb7c0e90bbfcc.yaml deleted file mode 100644 index b2a2addc7..000000000 --- a/releasenotes/notes/deletion-policy-11bcb7c0e90bbfcc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed an error in the built-in deletion policy which failed to process - NODE_DELETE action. diff --git a/releasenotes/notes/deletion-policy-node-delete-dc70da377b2a4f77.yaml b/releasenotes/notes/deletion-policy-node-delete-dc70da377b2a4f77.yaml deleted file mode 100644 index 574fc7b38..000000000 --- a/releasenotes/notes/deletion-policy-node-delete-dc70da377b2a4f77.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The deletion policy is enhanced to handle 'NODE_DELETE' actions which - derives from a standalone 'node_delete' request. diff --git a/releasenotes/notes/deprecate-json-formatted-policy-file-0c29555b3ea0c984.yaml b/releasenotes/notes/deprecate-json-formatted-policy-file-0c29555b3ea0c984.yaml deleted file mode 100644 index c9c530004..000000000 --- a/releasenotes/notes/deprecate-json-formatted-policy-file-0c29555b3ea0c984.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -upgrade: - - | - The default value of ``[oslo_policy] policy_file`` config option has - been changed from ``policy.json`` to ``policy.yaml``. - Operators who are utilizing customized or previously generated - static policy JSON files (which are not needed by default), should - generate new policy files or convert them in YAML format. Use the - `oslopolicy-convert-json-to-yaml - `_ - tool to convert a JSON to YAML formatted policy file in - backward compatible way. -deprecations: - - | - Use of JSON policy files was deprecated by the ``oslo.policy`` library - during the Victoria development cycle. As a result, this deprecation is - being noted in the Wallaby cycle with an anticipated future removal of support - by ``oslo.policy``. As such operators will need to convert to YAML policy - files. Please see the upgrade notes for details on migration of any - custom policy files. diff --git a/releasenotes/notes/destroy-nodes-after-remove-37bffdc35a9b7a96.yaml b/releasenotes/notes/destroy-nodes-after-remove-37bffdc35a9b7a96.yaml deleted file mode 100644 index e86fdbb98..000000000 --- a/releasenotes/notes/destroy-nodes-after-remove-37bffdc35a9b7a96.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new, optional parameter "destroy_after_deletion" is added to the - cluster-del-nodes request since API micro-version 1.4. diff --git a/releasenotes/notes/doc-fixes-0783e8120b61299br.yaml b/releasenotes/notes/doc-fixes-0783e8120b61299br.yaml deleted file mode 100644 index ef88c65fc..000000000 --- a/releasenotes/notes/doc-fixes-0783e8120b61299br.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed the example of "aodh alarm create" command. diff --git a/releasenotes/notes/doc-fixes-5057bf93464810cc.yaml b/releasenotes/notes/doc-fixes-5057bf93464810cc.yaml deleted file mode 100644 index 4ba8a350a..000000000 --- a/releasenotes/notes/doc-fixes-5057bf93464810cc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -fixes: - - | - Various fixes to the user doc, developer doc and API documentation. - Fixed api-ref and docs building. - Fixed keystone_authtoken config in docs. - Updated docs and examples for health policy v1.1. - Updated api-ref location. - Updated Cirros Example file. - diff --git a/releasenotes/notes/doc-fixes-685c64d1ef509041.yaml b/releasenotes/notes/doc-fixes-685c64d1ef509041.yaml deleted file mode 100644 index 09967462c..000000000 --- a/releasenotes/notes/doc-fixes-685c64d1ef509041.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Senlin API/Function/Integration test were moved to senlin-tempest-plugin project before, fixed - doc for this change. diff --git a/releasenotes/notes/doc-fixes-cd8c7006f8c66387.yaml b/releasenotes/notes/doc-fixes-cd8c7006f8c66387.yaml deleted file mode 100644 index 2ae931e40..000000000 --- a/releasenotes/notes/doc-fixes-cd8c7006f8c66387.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Various fixes to the user doc, developer doc and API documentation. diff --git a/releasenotes/notes/doc-fixes-e60bb1a486f67e0c.yaml b/releasenotes/notes/doc-fixes-e60bb1a486f67e0c.yaml deleted file mode 100644 index 5124344d7..000000000 --- a/releasenotes/notes/doc-fixes-e60bb1a486f67e0c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Various bug fixes to the user manual and sample profiles/policies. diff --git a/releasenotes/notes/docker-reboot-999ec624186864e3.yaml b/releasenotes/notes/docker-reboot-999ec624186864e3.yaml deleted file mode 100644 index 4f678f5ca..000000000 --- a/releasenotes/notes/docker-reboot-999ec624186864e3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed an error when restarting a docker container node. diff --git a/releasenotes/notes/docker-start-c850c256c6149f4f.yaml b/releasenotes/notes/docker-start-c850c256c6149f4f.yaml deleted file mode 100644 index 2453dc4e4..000000000 --- a/releasenotes/notes/docker-start-c850c256c6149f4f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added operation support to start a docker container. diff --git a/releasenotes/notes/docker-update-1b465241ca78873c.yaml b/releasenotes/notes/docker-update-1b465241ca78873c.yaml deleted file mode 100644 index 16947e922..000000000 --- a/releasenotes/notes/docker-update-1b465241ca78873c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Supported update name operation for docker profile. diff --git a/releasenotes/notes/drop-py-2-7-154eeefdc9886091.yaml b/releasenotes/notes/drop-py-2-7-154eeefdc9886091.yaml deleted file mode 100644 index e63550121..000000000 --- a/releasenotes/notes/drop-py-2-7-154eeefdc9886091.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Python 2.7 support has been dropped. Last release of Senlin - to support python 2.7 is OpenStack Train. The minimum version of Python now - supported by Senlin is Python 3.6. diff --git a/releasenotes/notes/drop-py34-support-21e20efb9bf0b326.yaml b/releasenotes/notes/drop-py34-support-21e20efb9bf0b326.yaml deleted file mode 100644 index e5c87438a..000000000 --- a/releasenotes/notes/drop-py34-support-21e20efb9bf0b326.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - | - The support to py3.4 is dropped. Please use py3.5 instead. diff --git a/releasenotes/notes/drop-python-3-6-and-3-7-3a90d172a5e43660.yaml b/releasenotes/notes/drop-python-3-6-and-3-7-3a90d172a5e43660.yaml deleted file mode 100644 index 3d5e4e3d3..000000000 --- a/releasenotes/notes/drop-python-3-6-and-3-7-3a90d172a5e43660.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - Python 3.6 & 3.7 support has been dropped. The minimum version of Python now - supported is Python 3.8. \ No newline at end of file diff --git a/releasenotes/notes/dynamic-timer-67f053499f4b32e2.yaml b/releasenotes/notes/dynamic-timer-67f053499f4b32e2.yaml deleted file mode 100644 index be25162aa..000000000 --- a/releasenotes/notes/dynamic-timer-67f053499f4b32e2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The health manager is improved to use dynamic timers instead of fix - interval timers when polling cluster's status. diff --git a/releasenotes/notes/enforce-multi-tenancy-ee27b9bfec7ba405.yaml b/releasenotes/notes/enforce-multi-tenancy-ee27b9bfec7ba405.yaml deleted file mode 100644 index 00320ada1..000000000 --- a/releasenotes/notes/enforce-multi-tenancy-ee27b9bfec7ba405.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -security: - - Multi-tenancy is enhanced so that an admin role user has to respect - project isolation unless explicitly asking for an exception. diff --git a/releasenotes/notes/error-messages-bd8b5a6d12e2c4af.yaml b/releasenotes/notes/error-messages-bd8b5a6d12e2c4af.yaml deleted file mode 100644 index 51951fc6a..000000000 --- a/releasenotes/notes/error-messages-bd8b5a6d12e2c4af.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Error messages returned from API requests are now unified. All parameter - validation failures of the same reason returns a similar message. diff --git a/releasenotes/notes/event-for-derived-actions-8bd44367fa683dbc.yaml b/releasenotes/notes/event-for-derived-actions-8bd44367fa683dbc.yaml deleted file mode 100644 index 9c16210f5..000000000 --- a/releasenotes/notes/event-for-derived-actions-8bd44367fa683dbc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A configuration option "exclude_derived_actions" is introduced into the - "dispatchers" group for controlling whether derived actions should lead - into event notifications and/or DB records. diff --git a/releasenotes/notes/event-list-b268bb778efa9ee1.yaml b/releasenotes/notes/event-list-b268bb778efa9ee1.yaml deleted file mode 100644 index bd472df45..000000000 --- a/releasenotes/notes/event-list-b268bb778efa9ee1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - New logics added to event-list operation so that users can specify the - name or short-id of a cluster for filtering. diff --git a/releasenotes/notes/event-notification-eda06b43ce17a081.yaml b/releasenotes/notes/event-notification-eda06b43ce17a081.yaml deleted file mode 100644 index c76fb511d..000000000 --- a/releasenotes/notes/event-notification-eda06b43ce17a081.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The engine has been augmented to send event notifications only when a node - is active and it has a physical ID associated. This is targeting at the - lifecycle hooks and possibly other notifications. diff --git a/releasenotes/notes/event-purge-db868a063e18eafb.yaml b/releasenotes/notes/event-purge-db868a063e18eafb.yaml deleted file mode 100644 index fd2de9ff8..000000000 --- a/releasenotes/notes/event-purge-db868a063e18eafb.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A event_purge subcommand is added to senlin-manage tool for purging events - generated in a specific project. diff --git a/releasenotes/notes/event-table-change-dcb42c8b6d145fec.yaml b/releasenotes/notes/event-table-change-dcb42c8b6d145fec.yaml deleted file mode 100644 index 1b1f0f5d0..000000000 --- a/releasenotes/notes/event-table-change-dcb42c8b6d145fec.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - DB columns obj_id, obj_type and obj_name in the event table are now - renamed to oid, otype and oname correspondingly. diff --git a/releasenotes/notes/fail-fast-on-locked-resource-eee28572dc40009a.yaml b/releasenotes/notes/fail-fast-on-locked-resource-eee28572dc40009a.yaml deleted file mode 100644 index a8d100553..000000000 --- a/releasenotes/notes/fail-fast-on-locked-resource-eee28572dc40009a.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -prelude: > - This release alters the behavior of cluster and node APIs which create, update or - delete either resource. In the previous release those API calls would be - accepted even if the target resource was already locked by another action. - The old implementation would wait until the other action released the lock - and then continue to execute the desired action. With the new implementation - any API calls for cluster or node that modify said resource will be rejected - with 409 conflict. -features: - - | - [`blueprint fail-fast-locked-resource `_] - POST, PATCH or DELETE API calls for clusters or nodes that require a lock are - rejected with 409 resource conflict if another action is already holding a - lock on the target resource. diff --git a/releasenotes/notes/fix-action-triggering-e880b02234028315.yaml b/releasenotes/notes/fix-action-triggering-e880b02234028315.yaml deleted file mode 100644 index 2157fa84c..000000000 --- a/releasenotes/notes/fix-action-triggering-e880b02234028315.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - When an action was marked as RETRY, its status is reset to READY for a - reschedule. A bug related to this behavior is now fixed. diff --git a/releasenotes/notes/fix-aodh-integration-41e69276158ad233.yaml b/releasenotes/notes/fix-aodh-integration-41e69276158ad233.yaml deleted file mode 100644 index a45d4e611..000000000 --- a/releasenotes/notes/fix-aodh-integration-41e69276158ad233.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - The API microversion 1.10 has fixed the webhook trigger API for easier - integration with Aodh. In previous microversions, the query parameters - are used as action inputs. Starting from 1.10, the key-value pairs in the - request body are also considered as request inputs. diff --git a/releasenotes/notes/fix-cluster-index-ae0060b6337d6d55.yaml b/releasenotes/notes/fix-cluster-index-ae0060b6337d6d55.yaml deleted file mode 100644 index b55b4c06e..000000000 --- a/releasenotes/notes/fix-cluster-index-ae0060b6337d6d55.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fix cluster next_index update when adding nodes to cluster. diff --git a/releasenotes/notes/fix-cooldown-5082711989ecd536.yaml b/releasenotes/notes/fix-cooldown-5082711989ecd536.yaml deleted file mode 100644 index e1ac5ab9c..000000000 --- a/releasenotes/notes/fix-cooldown-5082711989ecd536.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed immature return from policy cooldown check. diff --git a/releasenotes/notes/fix-db-deadlock-1d2bdb9ce785734a.yaml b/releasenotes/notes/fix-db-deadlock-1d2bdb9ce785734a.yaml deleted file mode 100644 index 00257a2e8..000000000 --- a/releasenotes/notes/fix-db-deadlock-1d2bdb9ce785734a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed DB layer dead lock issue that surfaced recently during concurrent - DB operations. diff --git a/releasenotes/notes/fix-delete-apis-bf9f47b5fcf8f3e6.yaml b/releasenotes/notes/fix-delete-apis-bf9f47b5fcf8f3e6.yaml deleted file mode 100644 index ab830c56b..000000000 --- a/releasenotes/notes/fix-delete-apis-bf9f47b5fcf8f3e6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed resource delete operations which should return 204 status code with - body length of zero. diff --git a/releasenotes/notes/fix-delete-node-error-31575d62bc9375ec.yaml b/releasenotes/notes/fix-delete-node-error-31575d62bc9375ec.yaml deleted file mode 100644 index a9e664eb5..000000000 --- a/releasenotes/notes/fix-delete-node-error-31575d62bc9375ec.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bug when deleteing node error. diff --git a/releasenotes/notes/fix-desired-when-omitted-e7ffc0aa72ab8cc9.yaml b/releasenotes/notes/fix-desired-when-omitted-e7ffc0aa72ab8cc9.yaml deleted file mode 100644 index acc5bc382..000000000 --- a/releasenotes/notes/fix-desired-when-omitted-e7ffc0aa72ab8cc9.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed a bug related to desired_capacity when creating a cluster. The old - behavior was having it default to 1, however, the correct behavior should - be having it default to min_size if provided. diff --git a/releasenotes/notes/fix-dup-of-action-dump-0b95a07adf3ccdba.yaml b/releasenotes/notes/fix-dup-of-action-dump-0b95a07adf3ccdba.yaml deleted file mode 100644 index e9b264da0..000000000 --- a/releasenotes/notes/fix-dup-of-action-dump-0b95a07adf3ccdba.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed a problem related to duplicated event dumps during action execution. diff --git a/releasenotes/notes/fix-health-check-5d77795885676661.yaml b/releasenotes/notes/fix-health-check-5d77795885676661.yaml deleted file mode 100644 index 05d78d803..000000000 --- a/releasenotes/notes/fix-health-check-5d77795885676661.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bug in health checking which was introduced by oslo.context hanges. diff --git a/releasenotes/notes/fix-health-cluster-check-5ce1c0309c03c5d5.yaml b/releasenotes/notes/fix-health-cluster-check-5ce1c0309c03c5d5.yaml deleted file mode 100644 index f66d2c5e7..000000000 --- a/releasenotes/notes/fix-health-cluster-check-5ce1c0309c03c5d5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed when cluster doing resize/scale create nodes, and physcical id of this - nodes not found, the cluster will still can do health check. diff --git a/releasenotes/notes/fix-health-mgr-opts-99898614f37c5d74.yaml b/releasenotes/notes/fix-health-mgr-opts-99898614f37c5d74.yaml deleted file mode 100644 index 70cc25483..000000000 --- a/releasenotes/notes/fix-health-mgr-opts-99898614f37c5d74.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed the problem that health manager related configuration options were - not properly exposed. diff --git a/releasenotes/notes/fix-health-policy-bind-9b6ed0e51939eac3.yaml b/releasenotes/notes/fix-health-policy-bind-9b6ed0e51939eac3.yaml deleted file mode 100644 index 16fa17011..000000000 --- a/releasenotes/notes/fix-health-policy-bind-9b6ed0e51939eac3.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bug when checking if health policy is attached already. diff --git a/releasenotes/notes/fix-network-error-handling-e78da90b6bc2319c.yaml b/releasenotes/notes/fix-network-error-handling-e78da90b6bc2319c.yaml deleted file mode 100644 index 34a74d349..000000000 --- a/releasenotes/notes/fix-network-error-handling-e78da90b6bc2319c.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed error handling when network is not found in nova server creation. diff --git a/releasenotes/notes/fix-node-get-detail-4e6d30c3a6b2ce60.yaml b/releasenotes/notes/fix-node-get-detail-4e6d30c3a6b2ce60.yaml deleted file mode 100644 index cd81b4a18..000000000 --- a/releasenotes/notes/fix-node-get-detail-4e6d30c3a6b2ce60.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed get node detail when creating VM is failed - diff --git a/releasenotes/notes/fix-node-leak-9b1c08342a52542d.yaml b/releasenotes/notes/fix-node-leak-9b1c08342a52542d.yaml deleted file mode 100644 index 222c18dd7..000000000 --- a/releasenotes/notes/fix-node-leak-9b1c08342a52542d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed node leak when creating node failed. - diff --git a/releasenotes/notes/fix-node-recover-5af129bf0688577d.yaml b/releasenotes/notes/fix-node-recover-5af129bf0688577d.yaml deleted file mode 100644 index e4883f2d5..000000000 --- a/releasenotes/notes/fix-node-recover-5af129bf0688577d.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed node recover operation behavior so that unsupported operations can - be detected and handled. diff --git a/releasenotes/notes/fix-node-status-for-lb-fc7714da09bec2fb.yaml b/releasenotes/notes/fix-node-status-for-lb-fc7714da09bec2fb.yaml deleted file mode 100644 index c5574968f..000000000 --- a/releasenotes/notes/fix-node-status-for-lb-fc7714da09bec2fb.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - When a node cannot be added to a load-balancer although desired, or it can - not be removed from a load-balancer when requested, the node will be - marked as in WARNING status. diff --git a/releasenotes/notes/fix-openstacksdk -exception-b762e649bfab4b31r.yaml b/releasenotes/notes/fix-openstacksdk -exception-b762e649bfab4b31r.yaml deleted file mode 100644 index a43143157..000000000 --- a/releasenotes/notes/fix-openstacksdk -exception-b762e649bfab4b31r.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - In openstacksdk 0.14.0 release, a bug related to SDK exception was fixed - "https://review.openstack.org/#/c/571101/". With that change a SDK exception will contain the - detailed message only if the message string is equal to 'Error'. Fixed the - test_parse_exception_http_exception_no_details to use 'Error' as the exception message to make - the test case pass. diff --git a/releasenotes/notes/fix-policy-type-version-939a1fb4e84908f9.yaml b/releasenotes/notes/fix-policy-type-version-939a1fb4e84908f9.yaml deleted file mode 100644 index 1dc7acc05..000000000 --- a/releasenotes/notes/fix-policy-type-version-939a1fb4e84908f9.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -issues: - - There are cases where the event listener based health management cannot - successfully stop all listeners. -fixes: - - Enable old versions of builtin policy types to be listed and used. diff --git a/releasenotes/notes/fix-port-id-parameter-de4679438a891a67r.yaml b/releasenotes/notes/fix-port-id-parameter-de4679438a891a67r.yaml deleted file mode 100644 index 1779cd124..000000000 --- a/releasenotes/notes/fix-port-id-parameter-de4679438a891a67r.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Pass in correct port id parameter when calling interface create on a server. diff --git a/releasenotes/notes/fix-recover-trigger-749600f500f7bf4a.yaml b/releasenotes/notes/fix-recover-trigger-749600f500f7bf4a.yaml deleted file mode 100644 index f6a0718ba..000000000 --- a/releasenotes/notes/fix-recover-trigger-749600f500f7bf4a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed error in the return value of node-check which prevents node-recover - from being triggered. diff --git a/releasenotes/notes/fix-registry-claim-5421dca1ed9b0783.yaml b/releasenotes/notes/fix-registry-claim-5421dca1ed9b0783.yaml deleted file mode 100644 index c72613c3a..000000000 --- a/releasenotes/notes/fix-registry-claim-5421dca1ed9b0783.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed a problem when claiming a cluster from health registry if service - engine is stopped (killed) and restarted quickly. diff --git a/releasenotes/notes/fix-security-group-with-same-name-887487416f4525a1.yaml b/releasenotes/notes/fix-security-group-with-same-name-887487416f4525a1.yaml deleted file mode 100644 index d10ea46bb..000000000 --- a/releasenotes/notes/fix-security-group-with-same-name-887487416f4525a1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Find security group profiles by project scope. diff --git a/releasenotes/notes/fix-tag-for-stacks-2ef70be061e80253.yaml b/releasenotes/notes/fix-tag-for-stacks-2ef70be061e80253.yaml deleted file mode 100644 index 074e5b52f..000000000 --- a/releasenotes/notes/fix-tag-for-stacks-2ef70be061e80253.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed an error in updating stack tags when the stack joins or leaves a - cluster. diff --git a/releasenotes/notes/fix-tox-cover-9fc01b5e0594aa19r.yaml b/releasenotes/notes/fix-tox-cover-9fc01b5e0594aa19r.yaml deleted file mode 100644 index bbd6723c5..000000000 --- a/releasenotes/notes/fix-tox-cover-9fc01b5e0594aa19r.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed openstack-tox-cover which was broken as part of the switch to stestr. diff --git a/releasenotes/notes/fix-update-lb-policy-0af6e8866f3b5543.yaml b/releasenotes/notes/fix-update-lb-policy-0af6e8866f3b5543.yaml deleted file mode 100644 index f35e71de6..000000000 --- a/releasenotes/notes/fix-update-lb-policy-0af6e8866f3b5543.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - Updates should still be allowed in a DEGRADED state lest LB policy becomes - unable to operate on any partially operational cluster. - - diff --git a/releasenotes/notes/forbid-cluster-deletion-a8b0f55aaf0aa106.yaml b/releasenotes/notes/forbid-cluster-deletion-a8b0f55aaf0aa106.yaml deleted file mode 100644 index a833e8560..000000000 --- a/releasenotes/notes/forbid-cluster-deletion-a8b0f55aaf0aa106.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - A cluster in the middle