diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 42ccd1a7..00000000 --- a/.coveragerc +++ /dev/null @@ -1,9 +0,0 @@ -[run] -branch = True -source = magnum -omit = magnum/tests/* - -[report] -ignore_errors = True -exclude_lines = - pass diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 9ce716b3..00000000 --- a/.gitignore +++ /dev/null @@ -1,66 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg* -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -cover -cover-master -.tox -nosetests.xml -.testrepository -.venv - -# Functional test -functional-tests.log -functional_creds.conf - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject -.idea - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? -*.DS_Store - -# generated config file -etc/magnum/magnum.conf.sample - -# Files created by releasenotes build -releasenotes/build diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 40295fb6..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/magnum.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6fe..00000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index d39b6aa4..00000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-45} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./magnum/tests/unit} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 4abf70fd..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - http://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/magnum diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 9a8eb8c2..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,24 +0,0 @@ -Magnum Style Commandments -========================= - -- Step 1: Read the OpenStack Style Commandments - http://docs.openstack.org/developer/hacking/ -- Step 2: Read on - -Magnum Specific Commandments ----------------------------- - -- [M302] Change assertEqual(A is not None) by optimal assert like - assertIsNotNone(A). -- [M310] timeutils.utcnow() wrapper must be used instead of direct calls to - datetime.datetime.utcnow() to make it easy to override its return value. -- [M316] Change assertTrue(isinstance(A, B)) by optimal assert like - assertIsInstance(A, B). -- [M322] Method's default argument shouldn't be mutable. -- [M336] Must use a dict comprehension instead of a dict constructor - with a sequence of key-value pairs. -- [M338] Use assertIn/NotIn(A, B) rather than assertEqual(A in B, True/False). -- [M339] Don't use xrange() -- [M340] Check for explicit import of the _ function. -- [M352] LOG.warn is deprecated. Enforce use of LOG.warning. -- [M353] String interpolation should be delayed at logging calls. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README b/README new file mode 100644 index 00000000..8fcd2b2f --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index c19e572a..00000000 --- a/README.rst +++ /dev/null @@ -1,24 +0,0 @@ -======================== -Team and repository tags -======================== - -.. image:: https://governance.openstack.org/badges/magnum.svg - :target: https://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on - -====== -Magnum -====== - -Magnum is an OpenStack project which offers container orchestration engines -for deploying and managing containers as first class resources in OpenStack. - -For more information, please refer to the following resources: - -* **Free software:** under the `Apache license `_ -* **Documentation:** https://docs.openstack.org/magnum/latest/ -* **Source:** http://git.openstack.org/cgit/openstack/magnum -* **Blueprints:** https://blueprints.launchpad.net/magnum -* **Bugs:** http://bugs.launchpad.net/magnum -* **REST Client:** http://git.openstack.org/cgit/openstack/python-magnumclient diff --git a/api-ref/source/baymodels.inc b/api-ref/source/baymodels.inc deleted file mode 100644 index 86b917c1..00000000 --- a/api-ref/source/baymodels.inc +++ /dev/null @@ -1,366 +0,0 @@ -.. -*- rst -*- - -=================== - Manage Baymodels -=================== - -Lists, creates, shows details for, updates, and deletes baymodels. - -Create new baymodel -==================== - -.. rest_method:: POST /v1/baymodels/ - -Create new baymodel. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - labels: labels - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - http_proxy: http_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - name: name - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Request Example ----------------- - -.. literalinclude:: samples/baymodel-create-req.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - insecure_registry: insecure_registry - - links: links - - http_proxy: http_proxy - - updated_at: updated_at - - floating_ip_enabled: floating_ip_enabled - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - uuid: baymodel_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - labels: labels - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - cluster_distro: cluster_distro - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - apiserver_port: apiserver_port - - name: name - - created_at: created_at - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Response Example ----------------- - -.. literalinclude:: samples/baymodel-create-resp.json - :language: javascript - -List all baymodels -================== - -.. rest_method:: GET /v1/baymodels/ - -List all available baymodels in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - baymodels: baymodel_list - - insecure_registry: insecure_registry - - links: links - - http_proxy: http_proxy - - updated_at: updated_at - - floating_ip_enabled: floating_ip_enabled - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - uuid: baymodel_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - labels: labels - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - cluster_distro: cluster_distro - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - apiserver_port: apiserver_port - - name: name - - created_at: created_at - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Response Example ----------------- - -.. literalinclude:: samples/baymodel-get-all-resp.json - :language: javascript - -Show details of a baymodel -========================== - -.. rest_method:: GET /v1/baymodels/{baymodel_ident} - -Get all information of a baymodel in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - baymodel_ident: baymodel_ident - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - baymodels: baymodel_list - - insecure_registry: insecure_registry - - links: links - - http_proxy: http_proxy - - updated_at: updated_at - - floating_ip_enabled: floating_ip_enabled - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - uuid: baymodel_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - labels: labels - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - cluster_distro: cluster_distro - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - apiserver_port: apiserver_port - - name: name - - created_at: created_at - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Response Example ----------------- - -.. literalinclude:: samples/baymodel-create-resp.json - :language: javascript - -Delete a baymodel -================== - -.. rest_method:: DELETE /v1/baymodels/{baymodel_ident} - -Delete a baymodel. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 409 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - baymodel_ident: baymodel_ident - -Response --------- - -This request does not return anything in the response body. - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - -Update information of baymodel -=============================== - -.. rest_method:: PATCH /v1/baymodels/{baymodel_ident} - -Update information of one baymodel attributes using operations including: -``add``, ``replace`` or ``remove``. The attributes to ``add`` and ``replace`` -in the form of ``key=value`` while ``remove`` only needs the keys. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - baymodel_ident: baymodel_ident - - path: path - - value: value - - op: op - -Request Example ----------------- - -.. literalinclude:: samples/baymodel-update-req.json - :language: javascript - -Response --------- - -Return new baymodel with updated attributes. - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - baymodels: baymodel_list - - insecure_registry: insecure_registry - - links: links - - http_proxy: http_proxy - - updated_at: updated_at - - floating_ip_enabled: floating_ip_enabled - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - uuid: baymodel_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - labels: labels - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - cluster_distro: cluster_distro - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - apiserver_port: apiserver_port - - name: name - - created_at: created_at - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Response Example ----------------- - -.. literalinclude:: samples/baymodel-create-resp.json - :language: javascript \ No newline at end of file diff --git a/api-ref/source/bays.inc b/api-ref/source/bays.inc deleted file mode 100644 index f17a8a46..00000000 --- a/api-ref/source/bays.inc +++ /dev/null @@ -1,259 +0,0 @@ -.. -*- rst -*- - -============ - Manage Bay -============ - -Lists, creates, shows details for, updates, and deletes Bay. - -Create new bay -============== - -.. rest_method:: POST /v1/bays - -Create new bay based on bay model. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name - - discovery_url: discovery_url - - master_count: master_count - - baymodel_id: baymodel_id - - node_count: node_count - - bay_create_timeout: bay_create_timeout - -.. note:: - - Request for creating bay is asynchronous from Newton. - -Request Example ----------------- - -.. literalinclude:: samples/bay-create-req.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - uuid: bay_id - -Response Example ----------------- - -.. literalinclude:: samples/bay-create-resp.json - :language: javascript - -List all bays -==================== - -.. rest_method:: GET /v1/bays/ - -List all bays in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - bays: bay_list - - status: status - - uuid: bay_id - - links: links - - stack_id: stack_id - - master_count: master_count - - baymodel_id: baymodel_id - - node_count: node_count - - bay_create_timeout: bay_create_timeout - - name: name - -Response Example ----------------- - -.. literalinclude:: samples/bay-get-all-resp.json - :language: javascript - -Show details of a bay -============================= - -.. rest_method:: GET /v1/bays/{bay_ident} - -Get all information of a bay in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - bay_ident: bay_ident - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - status: status - - uuid: bay_id - - links: links - - stack_id: stack_id - - created_at: created_at - - api_address: api_address - - discovery_url: discovery_url - - updated_at: updated_at - - master_count: master_count - - coe_version: coe_version - - baymodel_id: baymodel_id - - master_addresses: master_addresses - - node_count: node_count - - node_addresses: node_addresses - - status_reason: status_reason - - bay_create_timeout: bay_create_timeout - - name: name - -Response Example ----------------- - -.. literalinclude:: samples/bay-get-one-resp.json - :language: javascript - -Delete a bay -==================== - -.. rest_method:: DELETE /v1/bays/{bay_ident} - -Delete a bay. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 409 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - bay_ident: bay_ident - -Response --------- - -This request does not return anything in the response body. - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - -Update information of bay -================================= - -.. rest_method:: PATCH /v1/bays/{bay_ident} - -Update information of one bay attributes using operations -including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and -``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - bay_ident: bay_ident - - path: path - - value: value - - op: op - -.. note:: - - Request for updating bay is asynchronous from Newton. - Currently only attribute ``node_count`` are supported for operation - ``replace`` and ``remove``. - -Request Example ----------------- - -.. literalinclude:: samples/bay-update-req.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - uuid: bay_id - -Response Example ----------------- - -.. literalinclude:: samples/bay-create-resp.json - :language: javascript diff --git a/api-ref/source/certificates.inc b/api-ref/source/certificates.inc deleted file mode 100644 index aff0e244..00000000 --- a/api-ref/source/certificates.inc +++ /dev/null @@ -1,147 +0,0 @@ -.. -*- rst -*- - -===================================== - Manage certificates for bay/cluster -===================================== - -Generates and show CA certificates for bay/cluster. - -Show details about the CA certificate for a bay/cluster -======================================================= - -.. rest_method:: GET /v1/certificates/{bay_uuid/cluster_uuid} - -Show CA certificate details that are associated with the created bay/cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - bay_uuid: bay_id - -.. note:: - - After Newton, all terms related bay/baymodel will be renamed to cluster - and cluster template. - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - cluster_uuid: cluster_id - - pem: pem - - bay_uuid: bay_id - - links: links - -.. note:: - - After Newton, all terms related bay/baymodel will be renamed to cluster - and cluster template. - -Response Example ----------------- - -.. literalinclude:: samples/certificates-ca-show-resp.json - :language: javascript - -Generate the CA certificate for a bay/cluster -============================================= - -.. rest_method:: POST /v1/certificates/ - -Sign client key and generate the CA certificate for a bay/cluster - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - bay_uuid: bay_id - - csr: csr - -.. note:: - - After Newton, all terms related bay/baymodel will be renamed to cluster - and cluster template. - -Request Example ----------------- - -.. literalinclude:: samples/certificates-ca-sign-req.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - pem: pem - - bay_uuid: bay_id - - links: links - - csr: csr - -.. note:: - - After Newton, all terms related bay/baymodel will be renamed to cluster - and cluster template. - -Response Example ----------------- - -.. literalinclude:: samples/certificates-ca-sign-resp.json - :language: javascript - -Rotate the CA certificate for a bay/cluster -=========================================== - -.. rest_method:: PATCH /v1/certificates/{bay_uuid/cluster_uuid} - -Rotate the CA certificate for a bay/cluster and invalidate all user -certificates. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster: cluster_id diff --git a/api-ref/source/clusters.inc b/api-ref/source/clusters.inc deleted file mode 100644 index 7a826052..00000000 --- a/api-ref/source/clusters.inc +++ /dev/null @@ -1,262 +0,0 @@ -.. -*- rst -*- - -================ - Manage Cluster -================ - -Lists, creates, shows details for, updates, and deletes Cluster. - -Create new cluster -================== - -.. rest_method:: POST /v1/clusters - -Create new cluster based on cluster template. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name - - discovery_url: discovery_url - - master_count: master_count - - cluster_template_id: clustertemplate_id - - node_count: node_count - - create_timeout: create_timeout - - keypair: keypair_id - -.. note:: - - Request for creating cluster is asynchronous from Newton. - -Request Example ----------------- - -.. literalinclude:: samples/cluster-create-req.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - uuid: cluster_id - -Response Example ----------------- - -.. literalinclude:: samples/cluster-create-resp.json - :language: javascript - -List all clusters -================= - -.. rest_method:: GET /v1/clusters - -List all clusters in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - clusters: cluster_list - - status: status - - uuid: cluster_id - - links: links - - stack_id: stack_id - - keypair: keypair_id - - master_count: master_count - - cluster_template_id: clustertemplate_id - - node_count: node_count - - create_timeout: create_timeout - - name: name - -Response Example ----------------- - -.. literalinclude:: samples/cluster-get-all-resp.json - :language: javascript - -Show details of a cluster -========================= - -.. rest_method:: GET /v1/clusters/{cluster_ident} - -Get all information of a cluster in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_ident: cluster_ident - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - status: status - - uuid: cluster_id - - links: links - - stack_id: stack_id - - created_at: created_at - - api_address: api_address - - discovery_url: discovery_url - - updated_at: updated_at - - master_count: master_count - - coe_version: coe_version - - keypair: keypair_id - - cluster_template_id: clustertemplate_id - - master_addresses: master_addresses - - node_count: node_count - - node_addresses: node_addresses - - status_reason: status_reason - - create_timeout: create_timeout - - name: name - -Response Example ----------------- - -.. literalinclude:: samples/cluster-get-one-resp.json - :language: javascript - -Delete a cluster -==================== - -.. rest_method:: DELETE /v1/clusters/{cluster_ident} - -Delete a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 409 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_ident: cluster_ident - -Response --------- - -This request does not return anything in the response body. - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - -Update information of cluster -============================= - -.. rest_method:: PATCH /v1/clusters/{cluster_ident} - -Update information of one cluster attributes using operations -including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and -``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_ident: cluster_ident - - path: path - - value: value - - op: op - -.. note:: - - Request for updating cluster is asynchronous from Newton. - Currently only attribute ``node_count`` are supported for operation - ``replace`` and ``remove``. - -Request Example ----------------- - -.. literalinclude:: samples/cluster-update-req.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - uuid: cluster_id - -Response Example ----------------- - -.. literalinclude:: samples/cluster-create-resp.json - :language: javascript diff --git a/api-ref/source/clustertemplates.inc b/api-ref/source/clustertemplates.inc deleted file mode 100644 index c068253d..00000000 --- a/api-ref/source/clustertemplates.inc +++ /dev/null @@ -1,366 +0,0 @@ -.. -*- rst -*- - -========================== - Manage Cluster Templates -========================== - -Lists, creates, shows details for, updates, and deletes Cluster Templates. - -Create new cluster template -===================================== - -.. rest_method:: POST /v1/clustertemplates - -Create new cluster template. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - labels: labels - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - http_proxy: http_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - name: name - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Request Example ----------------- - -.. literalinclude:: samples/clustertemplate-create-req.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - insecure_registry: insecure_registry - - links: links - - http_proxy: http_proxy - - updated_at: updated_at - - floating_ip_enabled: floating_ip_enabled - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - uuid: clustertemplate_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - labels: labels - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - cluster_distro: cluster_distro - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - apiserver_port: apiserver_port - - name: name - - created_at: created_at - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Response Example ----------------- - -.. literalinclude:: samples/clustertemplate-create-resp.json - :language: javascript - -List all cluster templates -========================== - -.. rest_method:: GET /v1/clustertemplates - -List all available cluster templates in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - clustertemplates: clustertemplate_list - - insecure_registry: insecure_registry - - links: links - - http_proxy: http_proxy - - updated_at: updated_at - - floating_ip_enabled: floating_ip_enabled - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - uuid: clustertemplate_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - labels: labels - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - cluster_distro: cluster_distro - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - apiserver_port: apiserver_port - - name: name - - created_at: created_at - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Response Example ----------------- - -.. literalinclude:: samples/clustertemplate-get-all-resp.json - :language: javascript - -Show details of a cluster template -================================== - -.. rest_method:: GET /v1/clustertemplates/{clustertemplate_ident} - -Get all information of a cluster template in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - clustertemplate_ident: clustertemplate_ident - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - clustertemplates: clustertemplate_list - - insecure_registry: insecure_registry - - links: links - - http_proxy: http_proxy - - updated_at: updated_at - - floating_ip_enabled: floating_ip_enabled - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - uuid: clustertemplate_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - labels: labels - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - cluster_distro: cluster_distro - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - apiserver_port: apiserver_port - - name: name - - created_at: created_at - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Response Example ----------------- - -.. literalinclude:: samples/clustertemplate-create-resp.json - :language: javascript - -Delete a cluster template -========================= - -.. rest_method:: DELETE /v1/clustertemplates/{clustertemplate_ident} - -Delete a cluster template. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 409 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - clustertemplate_ident: clustertemplate_ident - -Response --------- - -This request does not return anything in the response body. - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - -Update information of cluster template -================================================ - -.. rest_method:: PATCH /v1/clustertemplates/{clustertemplate_ident} - -Update information of one cluster template attributes using operations -including: ``add``, ``replace`` or ``remove``. The attributes to ``add`` and -``replace`` in the form of ``key=value`` while ``remove`` only needs the keys. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - clustertemplate_ident: clustertemplate_ident - - path: path - - value: value - - op: op - -Request Example ----------------- - -.. literalinclude:: samples/clustertemplate-update-req.json - :language: javascript - -Response --------- - -Return new cluster templates with updated attributes. - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - clustertemplates: clustertemplate_list - - insecure_registry: insecure_registry - - links: links - - http_proxy: http_proxy - - updated_at: updated_at - - floating_ip_enabled: floating_ip_enabled - - fixed_subnet: fixed_subnet - - master_flavor_id: master_flavor_id - - uuid: clustertemplate_id - - no_proxy: no_proxy - - https_proxy: https_proxy - - tls_disabled: tls_disabled - - keypair_id: keypair_id - - public: public_type - - labels: labels - - docker_volume_size: docker_volume_size - - server_type: server_type - - external_network_id: external_network_id - - cluster_distro: cluster_distro - - image_id: image_id - - volume_driver: volume_driver - - registry_enabled: registry_enabled - - docker_storage_driver: docker_storage_driver - - apiserver_port: apiserver_port - - name: name - - created_at: created_at - - network_driver: network_driver - - fixed_network: fixed_network - - coe: coe - - flavor_id: flavor_id - - master_lb_enabled: master_lb_enabled - - dns_nameserver: dns_nameserver - -Response Example ----------------- - -.. literalinclude:: samples/clustertemplate-create-resp.json - :language: javascript \ No newline at end of file diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index 4e1d4495..00000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Magnum documentation build configuration file -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import subprocess -import sys -import warnings - -extensions = [ - 'os_api_ref', -] - -import openstackdocstheme # noqa - -html_theme = 'openstackdocs' -html_theme_path = [openstackdocstheme.get_html_theme_path()] -html_theme_options = { - "sidebar_mode": "toc", -} - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Container Infrastructure Management API Reference' -copyright = u'2010-present, OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -from magnum.version import version_info -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# Config logABug feature -# source tree -giturl = ( - u'https://git.openstack.org/cgit/openstack/magnum/tree/api-ref/source') -# html_context allows us to pass arbitrary values into the html template -html_context = {'bug_tag': 'api-ref', - 'giturl': giturl, - 'bug_project': 'magnum'} - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' -git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", - "-n1"] -try: - html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8') -except Exception: - warnings.warn('Cannot get last updated time from git repository. ' - 'Not setting "html_last_updated_fmt".') - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'magnumdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Magnum.tex', - u'OpenStack Container Infrastructure Management API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index 9c829ec1..00000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -:tocdepth: 2 - -======================================== - Container Infrastructure Management API -======================================== - -.. rest_expand_all:: - -.. include:: versions.inc -.. include:: urls.inc -.. include:: bays.inc -.. include:: baymodels.inc -.. include:: clusters.inc -.. include:: clustertemplates.inc -.. include:: certificates.inc -.. include:: mservices.inc -.. include:: stats.inc -.. include:: quotas.inc diff --git a/api-ref/source/mservices.inc b/api-ref/source/mservices.inc deleted file mode 100644 index 208e2f1e..00000000 --- a/api-ref/source/mservices.inc +++ /dev/null @@ -1,49 +0,0 @@ -.. -*- rst -*- - -===================== -Manage Magnum service -===================== - -List container infrastructure management services -======================================================= - -.. rest_method:: GET /v1/mservices - -Enables administrative users to list all Magnum services. - -Container infrastructure service information include service id, binary, -host, report count, creation time, last updated time, health status, and -the reason for disabling service. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - mservices: mservices - - binary: binary - - created_at: created_at - - state: state - - report_count: report_count - - updated_at: updated_at - - host: host - - disabled_reason: disabled_reason - - id: id_s - -Response Example ----------------- - -.. literalinclude:: samples/mservice-get-resp.json - :language: javascript diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml deleted file mode 100644 index 0d575063..00000000 --- a/api-ref/source/parameters.yaml +++ /dev/null @@ -1,597 +0,0 @@ -# Header params -request_id: - type: UUID - in: header - required: true - description: | - A unique ID for tracking service request. The request ID associated - with the request by default appears in the service logs. - -# Path params -bay_ident: - type: string - in: path - required: true - description: | - The UUID or name of bays in Magnum. -baymodel_ident: - description: | - The UUID or name of baymodels in Magnum. - in: path - required: true - type: string -cluster_ident: - type: string - in: path - required: true - description: | - The UUID or name of clusters in Magnum. -clustertemplate_ident: - type: string - in: path - required: true - description: | - The UUID or name of cluster templates in Magnum. -project_id: - type: string - in: path - required: true - description: | - Project ID. - -# Body params -api_address: - description: | - The endpoint URL of COE API exposed to end-users. - in: body - format: uri - required: true - type: string -apiserver_port: - type: integer - in: body - required: true - description: | - The exposed port of COE API server. -bay_create_timeout: - type: integer - in: body - required: true - description: | - The timeout for bay creation in minutes. The value expected is a - positive integer and the default is 60 minutes. If the timeout is reached - during bay creation process, the operation will be aborted and the - bay status will be set to ``CREATE_FAILED``. -bay_id: - type: UUID - in: body - required: true - description: | - The UUID of the bay. -bay_list: - type: array - in: body - required: true - description: | - The list of all bays in Magnum. - The list of all clusters in Magnum. -baymodel_id: - type: UUID - in: body - required: true - description: | - The UUID of the baymodel. -baymodel_list: - type: array - in: body - required: true - description: | - The list of all baymodels in Magnum. -binary: - type: string - in: body - required: true - description: | - The name of the binary form of the Magnum service. -cluster_distro: - type: string - in: body - required: true - description: | - Display the attribute ``os-distro`` defined as appropriate metadata in - image for the bay/cluster driver. -cluster_id: - type: UUID - in: body - required: true - description: | - The UUID of the cluster. -cluster_list: - type: array - in: body - required: true - description: | - The list of all clusters in Magnum. -clusters: - type: integer - in: body - required: true - description: | - The number of clusters. -clustertemplate_id: - type: UUID - in: body - required: true - description: | - The UUID of the cluster template. -clustertemplate_list: - type: array - in: body - required: true - description: | - The list of all cluster templates in Magnum. -coe: - type: string - in: body - required: true - description: | - Specify the Container Orchestration Engine to use. Supported COEs - include ``kubernetes``, ``swarm``, ``mesos``. If your environment has - additional bay/cluster drivers installed, refer to the bay/cluster driver - documentation for the new COE names. -coe_version: - type: string - in: body - required: true - description: | - Version info of chosen COE in bay/cluster for helping client in picking - the right version of client. -create_timeout: - type: integer - in: body - required: true - description: | - The timeout for cluster creation in minutes. The value expected is a - positive integer and the default is 60 minutes. If the timeout is reached - during cluster creation process, the operation will be aborted and the - cluster status will be set to ``CREATE_FAILED``. -created_at: - description: | - The date and time when the resource was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -csr: - description: | - Certificate Signing Request (CSR) for authenticating client key. - - The CSR will be used by Magnum to generate a signed certificate - that client will use to communicate with the Bay/Cluster. - in: body - required: true - type: string -description: - description: | - Descriptive text about the Magnum service. - in: body - required: true - type: string -disabled_reason: - description: | - The disable reason of the service, ``null`` if the service is enabled or - disabled without reason provided. - in: body - required: true - type: string -discovery_url: - description: | - The custom discovery url for node discovery. This is used by the COE to - discover the servers that have been created to host the containers. The - actual discovery mechanism varies with the COE. In some cases, Magnum fills - in the server info in the discovery service. In other cases, if the - ``discovery_url`` is not specified, Magnum will use the public discovery - service at: - - :: - - https://discovery.etcd.io - - In this case, Magnum will generate a unique url here for each bay and - store the info for the servers. - in: body - format: uri - required: true - type: string -dns_nameserver: - description: | - The DNS nameserver for the servers and containers in the bay/cluster to - use. This is configured in the private Neutron network for the bay/cluster. - The default is ``8.8.8.8``. - in: body - required: true - type: string -docker_storage_driver: - description: | - The name of a driver to manage the storage for the images and the - container's writable layer. The supported drivers are ``devicemapper`` and - ``overlay``. The default is ``devicemapper``. - in: body - required: true - type: string -docker_volume_size: - description: | - The size in GB for the local storage on each server for the Docker daemon - to cache the images and host the containers. Cinder volumes provide the - storage. The default is 25 GB. For the ``devicemapper`` storage driver, - the minimum value is 3GB. For the ``overlay`` storage driver, the minimum - value is 1GB. - in: body - required: true - type: integer -external_network_id: - description: | - The name or network ID of a Neutron network to provide connectivity to the - external internet for the bay/cluster. This network must be an external - network, i.e. its attribute ``router:external`` must be ``True``. The - servers in the bay/cluster will be connected to a private network and - Magnum will create a router between this private network and the external - network. This will allow the servers to download images, access discovery - service, etc, and the containers to install packages, etc. In the opposite - direction, floating IPs will be allocated from the external network to - provide access from the external internet to servers and the container - services hosted in the bay/cluster. - in: body - required: true - type: string -fixed_network: - description: | - The name or network ID of a Neutron network to provide connectivity to - the internal network for the bay/cluster. - in: body - required: false - type: string -fixed_subnet: - description: | - Fixed subnet that are using to allocate network address for nodes in - bay/cluster. - in: body - required: false - type: string -flavor_id: - description: | - The nova flavor ID or name for booting the node servers. The default is - ``m1.small``. - in: body - required: true - type: string -floating_ip_enabled: - description: | - Whether enable or not using the floating IP of cloud provider. Some - cloud providers used floating IP, some used public IP, thus Magnum - provide this option for specifying the choice of using floating IP. - in: body - required: true - type: boolean -host: - description: | - The host for the service. - in: body - required: true - type: string -http_proxy: - description: | - The IP address for a proxy to use when direct http access from the servers - to sites on the external internet is blocked. This may happen in certain - countries or enterprises, and the proxy allows the servers and - containers to access these sites. The format is a URL including a port - number. The default is ``None``. - in: body - required: false - type: string -https_proxy: - description: | - The IP address for a proxy to use when direct https access from the - servers to sites on the external internet is blocked. This may happen in - certain countries or enterprises, and the proxy allows the servers and - containers to access these sites. The format is a URL including a port - number. The default is ``None``. - in: body - required: false - type: string -id_s: - description: | - The ID of the Magnum service. - in: body - required: true - type: string -image_id: - description: | - The name or UUID of the base image in Glance to boot the servers for the - bay/cluster. The image must have the attribute ``os-distro`` defined as - appropriate for the bay/cluster driver. - in: body - required: true - type: string -insecure_registry: - description: | - The URL pointing to users's own private insecure docker registry to - deploy and run docker containers. - in: body - required: true - type: string -keypair_id: - description: | - The name of the SSH keypair to configure in the bay/cluster servers - for ssh access. Users will need the key to be able to ssh to the servers in - the bay/cluster. The login name is specific to the bay/cluster driver, for - example with fedora-atomic image, default login name is ``fedora``. - in: body - required: true - type: string -labels: - description: | - Arbitrary labels in the form of ``key=value`` pairs. The accepted keys and - valid values are defined in the bay/cluster drivers. They are used as a way - to pass additional parameters that are specific to a bay/cluster driver. - in: body - required: false - type: array -links: - description: | - Links to the resources in question. - in: body - required: true - type: array -master_addresses: - description: | - List of floating IP of all master nodes. - in: body - required: true - type: array -master_count: - description: | - The number of servers that will serve as master for the bay/cluster. The - default is 1. Set to more than 1 master to enable High Availability. If - the option ``master-lb-enabled`` is specified in the baymodel/cluster - template, the master servers will be placed in a load balancer pool. - in: body - required: true - type: integer -master_flavor_id: - description: | - The flavor of the master node for this baymodel/cluster template. - in: body - required: false - type: string -master_lb_enabled: - description: | - Since multiple masters may exist in a bay/cluster, a Neutron load balancer - is created to provide the API endpoint for the bay/cluster and to direct - requests to the masters. In some cases, such as when the LBaaS service is - not available, this option can be set to ``false`` to create a bay/cluster - without the load balancer. In this case, one of the masters will serve as - the API endpoint. The default is ``true``, i.e. to create the load - balancer for the bay. - in: body - required: true - type: boolean -mservices: - description: | - A list of Magnum services. - in: body - required: true - type: array -name: - description: | - Name of the resource. - in: body - required: true - type: string -network_driver: - description: | - The name of a network driver for providing the networks for the containers. - Note that this is different and separate from the Neutron network for the - bay/cluster. The operation and networking model are specific to the - particular driver. - in: body - required: true - type: string -no_proxy: - description: | - When a proxy server is used, some sites should not go through the proxy - and should be accessed normally. In this case, users can specify these - sites as a comma separated list of IPs. The default is ``None``. - in: body - required: false - type: string -node_addresses: - description: | - List of floating IP of all servers that serve as node. - in: body - required: true - type: array -node_count: - description: | - The number of servers that will serve as node in the bay/cluster. The - default is 1. - in: body - required: true - type: integer -nodes: - description: | - The total number of nodes including master nodes. - in: body - required: true - type: integer -op: - description: | - The operation used to modify resource's attributes. Supported operations - are following: ``add``, ``replace`` and ``remove``. In case of - ``remove``, users only need to provide ``path`` for deleting attribute. - in: body - required: true - type: string -path: - description: | - Resource attribute's name. - in: body - required: true - type: string -pem: - description: | - CA certificate for the bay/cluster. - in: body - required: true - type: string -public_type: - description: | - Access to a baymodel/cluster template is normally limited to the admin, - owner or users within the same tenant as the owners. Setting this flag - makes the baymodel/cluster template public and accessible by other users. - The default is not public. - in: body - required: true - type: boolean -registry_enabled: - description: | - Docker images by default are pulled from the public Docker registry, - but in some cases, users may want to use a private registry. This option - provides an alternative registry based on the Registry V2: Magnum will - create a local registry in the bay/cluster backed by swift to host the - images. The default is to use the public registry. - in: body - required: false - type: boolean -report_count: - description: | - The total number of report. - in: body - required: true - type: integer -server_type: - description: | - The servers in the bay/cluster can be ``vm`` or ``baremetal``. This - parameter selects the type of server to create for the bay/cluster. - The default is ``vm``. - in: body - required: true - type: string -stack_id: - description: | - The reference UUID of orchestration stack from Heat orchestration service. - in: body - required: true - type: UUID -state: - description: | - The current state of Magnum services. - in: body - required: true - type: string -status: - description: | - The current state of the bay/cluster. - in: body - required: true - type: string -status_reason: - description: | - The reason of bay/cluster current status. - in: body - required: true - type: string -tls_disabled: - description: | - Transport Layer Security (TLS) is normally enabled to secure the - bay/cluster. In some cases, users may want to disable TLS in the - bay/cluster, for instance during development or to troubleshoot certain - problems. Specifying this parameter will disable TLS so that users can - access the COE endpoints without a certificate. The default is TLS enabled. - in: body - required: true - type: boolean -updated_at: - description: | - The date and time when the resource was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. In the previous example, the offset value is ``-05:00``. - - If the ``updated_at`` date and time stamp is not set, its value is - ``null``. - in: body - required: true - type: string -value: - description: | - Resource attribute's value. - in: body - required: true - type: string -version: - description: | - The version. - in: body - required: true - type: string -version_id: - type: string - in: body - required: true - description: > - A common name for the version in question. Informative only, it - has no real semantic meaning. -version_max: - type: string - in: body - required: true - description: > - If this version of the API supports microversions, the maximum - microversion that is supported. This will be the empty string if - microversions are not supported. -version_min: - type: string - in: body - required: true - description: > - If this version of the API supports microversions, the minimum - microversion that is supported. This will be the empty string if - microversions are not supported. -version_status: - type: string - in: body - required: true - description: | - The status of this API version. This can be one of: - - - ``CURRENT``: this is the preferred version of the API to use - - ``SUPPORTED``: this is an older, but still supported version of the API - - ``DEPRECATED``: a deprecated version of the API that is slated for removal -volume_driver: - type: string - in: body - required: true - description: > - The name of a volume driver for managing the persistent storage for - the containers. The functionality supported are specific to the driver. diff --git a/api-ref/source/quotas.inc b/api-ref/source/quotas.inc deleted file mode 100755 index 58495003..00000000 --- a/api-ref/source/quotas.inc +++ /dev/null @@ -1,151 +0,0 @@ -.. -*- rst -*- - -================= - Magnum Quota API -================= - -Lists, creates, shows details, and updates Quotas. - -Set new quota -================== - -.. rest_method:: POST /v1/quotas - -Create new quota for a project. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request Example ----------------- - -.. literalinclude:: samples/quota-create-req.json - :language: javascript - -Response Example ----------------- - -.. literalinclude:: samples/quota-create-resp.json - :language: javascript - -List all quotas -================ - -.. rest_method:: GET /v1/quotas - -List all quotas in Magnum. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Response Example ----------------- - -.. literalinclude:: samples/quota-get-all-resp.json - :language: javascript - -Show details of a quota -========================= - -.. rest_method:: GET /v1/quotas/{project_id}/{resource} - -Get quota information for the given project_id and resource. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - -Response Example ----------------- - -.. literalinclude:: samples/quota-get-one-resp.json - :language: javascript - -Update a resource quota -============================= - -.. rest_method:: PATCH /v1/quotas/{project_id}/{resource} - -Update resource quota for the given project id. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request Example ----------------- - -.. literalinclude:: samples/quota-update-req.json - :language: javascript - -Response Example ----------------- - -.. literalinclude:: samples/quota-update-resp.json - :language: javascript - -Delete a resource quota -============================ - -.. rest_method:: DELETE /v1/quotas/{project_id}/{resource} - -Delete a resource quota for the given project id. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request Example ----------------- - -.. literalinclude:: samples/quota-delete-req.json - :language: javascript \ No newline at end of file diff --git a/api-ref/source/samples/bay-create-req.json b/api-ref/source/samples/bay-create-req.json deleted file mode 100644 index bb3dd04a..00000000 --- a/api-ref/source/samples/bay-create-req.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "name":"k8s", - "discovery_url":null, - "master_count":2, - "baymodel_id":"0562d357-8641-4759-8fed-8173f02c9633", - "node_count":2, - "bay_create_timeout":60 -} \ No newline at end of file diff --git a/api-ref/source/samples/bay-create-resp.json b/api-ref/source/samples/bay-create-resp.json deleted file mode 100644 index 99d440d9..00000000 --- a/api-ref/source/samples/bay-create-resp.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "uuid":"746e779a-751a-456b-a3e9-c883d734946f" -} \ No newline at end of file diff --git a/api-ref/source/samples/bay-get-all-resp.json b/api-ref/source/samples/bay-get-all-resp.json deleted file mode 100644 index 9e970c4b..00000000 --- a/api-ref/source/samples/bay-get-all-resp.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "bays":[ - { - "status":"CREATE_COMPLETE", - "uuid":"746e779a-751a-456b-a3e9-c883d734946f", - "links":[ - { - "href":"http://10.164.180.104:9511/v1/bays/746e779a-751a-456b-a3e9-c883d734946f", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/bays/746e779a-751a-456b-a3e9-c883d734946f", - "rel":"bookmark" - } - ], - "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", - "master_count":1, - "baymodel_id":"0562d357-8641-4759-8fed-8173f02c9633", - "node_count":1, - "bay_create_timeout":60, - "name":"k8s" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/bay-get-one-resp.json b/api-ref/source/samples/bay-get-one-resp.json deleted file mode 100644 index 93cca3cb..00000000 --- a/api-ref/source/samples/bay-get-one-resp.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "status":"CREATE_COMPLETE", - "uuid":"746e779a-751a-456b-a3e9-c883d734946f", - "links":[ - { - "href":"http://10.164.180.104:9511/v1/bays/746e779a-751a-456b-a3e9-c883d734946f", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/bays/746e779a-751a-456b-a3e9-c883d734946f", - "rel":"bookmark" - } - ], - "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", - "created_at":"2016-08-29T06:51:31+00:00", - "api_address":"https://172.24.4.6:6443", - "discovery_url":"https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", - "updated_at":"2016-08-29T06:53:24+00:00", - "master_count":1, - "coe_version": "v1.2.0", - "baymodel_id":"0562d357-8641-4759-8fed-8173f02c9633", - "master_addresses":[ - "172.24.4.6" - ], - "node_count":1, - "node_addresses":[ - "172.24.4.13" - ], - "status_reason":"Stack CREATE completed successfully", - "bay_create_timeout":60, - "name":"k8s" -} \ No newline at end of file diff --git a/api-ref/source/samples/bay-update-req.json b/api-ref/source/samples/bay-update-req.json deleted file mode 100644 index c01e5edd..00000000 --- a/api-ref/source/samples/bay-update-req.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "path":"/node_count", - "value":2, - "op":"replace" - } -] \ No newline at end of file diff --git a/api-ref/source/samples/baymodel-create-req.json b/api-ref/source/samples/baymodel-create-req.json deleted file mode 100644 index 38bb40c5..00000000 --- a/api-ref/source/samples/baymodel-create-req.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "labels":{ - - }, - "fixed_subnet":null, - "master_flavor_id":null, - "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", - "https_proxy":"http://10.164.177.169:8080", - "tls_disabled":false, - "keypair_id":"kp", - "public":false, - "http_proxy":"http://10.164.177.169:8080", - "docker_volume_size":3, - "server_type":"vm", - "external_network_id":"public", - "image_id":"fedora-atomic-latest", - "volume_driver":"cinder", - "registry_enabled":false, - "docker_storage_driver":"devicemapper", - "name":"k8s-bm2", - "network_driver":"flannel", - "fixed_network":null, - "coe":"kubernetes", - "flavor_id":"m1.small", - "master_lb_enabled":true, - "dns_nameserver":"8.8.8.8" -} \ No newline at end of file diff --git a/api-ref/source/samples/baymodel-create-resp.json b/api-ref/source/samples/baymodel-create-resp.json deleted file mode 100644 index 7b1f4f41..00000000 --- a/api-ref/source/samples/baymodel-create-resp.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "insecure_registry":null, - "links":[ - { - "href":"http://10.164.180.104:9511/v1/baymodels/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/baymodels/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "rel":"bookmark" - } - ], - "http_proxy":"http://10.164.177.169:8080", - "updated_at":null, - "floating_ip_enabled":true, - "fixed_subnet":null, - "master_flavor_id":null, - "uuid":"085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", - "https_proxy":"http://10.164.177.169:8080", - "tls_disabled":false, - "keypair_id":"kp", - "public":false, - "labels":{ - - }, - "docker_volume_size":3, - "server_type":"vm", - "external_network_id":"public", - "cluster_distro":"fedora-atomic", - "image_id":"fedora-atomic-latest", - "volume_driver":"cinder", - "registry_enabled":false, - "docker_storage_driver":"devicemapper", - "apiserver_port":null, - "name":"k8s-bm2", - "created_at":"2016-08-29T02:08:08+00:00", - "network_driver":"flannel", - "fixed_network":null, - "coe":"kubernetes", - "flavor_id":"m1.small", - "master_lb_enabled":true, - "dns_nameserver":"8.8.8.8" -} \ No newline at end of file diff --git a/api-ref/source/samples/baymodel-get-all-resp.json b/api-ref/source/samples/baymodel-get-all-resp.json deleted file mode 100644 index e7c7d6c2..00000000 --- a/api-ref/source/samples/baymodel-get-all-resp.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "baymodels":[ - { - "insecure_registry":null, - "links":[ - { - "href":"http://10.164.180.104:9511/v1/baymodels/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/baymodels/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "rel":"bookmark" - } - ], - "http_proxy":"http://10.164.177.169:8080", - "updated_at":null, - "floating_ip_enabled":true, - "fixed_subnet":null, - "master_flavor_id":null, - "uuid":"085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", - "https_proxy":"http://10.164.177.169:8080", - "tls_disabled":false, - "keypair_id":"kp", - "public":false, - "labels":{ - - }, - "docker_volume_size":3, - "server_type":"vm", - "external_network_id":"public", - "cluster_distro":"fedora-atomic", - "image_id":"fedora-atomic-latest", - "volume_driver":"cinder", - "registry_enabled":false, - "docker_storage_driver":"devicemapper", - "apiserver_port":null, - "name":"k8s-bm2", - "created_at":"2016-08-29T02:08:08+00:00", - "network_driver":"flannel", - "fixed_network":null, - "coe":"kubernetes", - "flavor_id":"m1.small", - "master_lb_enabled":true, - "dns_nameserver":"8.8.8.8" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/baymodel-update-req.json b/api-ref/source/samples/baymodel-update-req.json deleted file mode 100644 index 9c604dc3..00000000 --- a/api-ref/source/samples/baymodel-update-req.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "path":"/master_lb_enabled", - "value":"True", - "op":"replace" - }, - { - "path":"/registry_enabled", - "value":"True", - "op":"replace" - } -] \ No newline at end of file diff --git a/api-ref/source/samples/certificates-ca-show-resp.json b/api-ref/source/samples/certificates-ca-show-resp.json deleted file mode 100644 index d53909b2..00000000 --- a/api-ref/source/samples/certificates-ca-show-resp.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "cluster_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4" - "pem":"-----BEGIN CERTIFICATE-----\nMIICzDCCAbSgAwIBAgIQOOkVcEN7TNa9E80GoUs4xDANBgkqhkiG9w0BAQsFADAO\n-----END CERTIFICATE-----\n", - "bay_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", - "links":[ - { - "href":"http://10.164.180.104:9511/v1/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", - "rel":"bookmark" - } - ] -} diff --git a/api-ref/source/samples/certificates-ca-sign-req.json b/api-ref/source/samples/certificates-ca-sign-req.json deleted file mode 100644 index b2ff96c2..00000000 --- a/api-ref/source/samples/certificates-ca-sign-req.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "bay_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", - "csr":"-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n" -} \ No newline at end of file diff --git a/api-ref/source/samples/certificates-ca-sign-resp.json b/api-ref/source/samples/certificates-ca-sign-resp.json deleted file mode 100644 index 9858da7e..00000000 --- a/api-ref/source/samples/certificates-ca-sign-resp.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "pem":"-----BEGIN CERTIFICATE-----\nMIIDxDCCAqygAwIBAgIRALgUbIjdKUy8lqErJmCxVfkwDQYJKoZIhvcNAQELBQAw\n-----END CERTIFICATE-----\n", - "bay_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", - "links":[ - { - "href":"http://10.164.180.104:9511/v1/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", - "rel":"bookmark" - } - ], - "csr":"-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n" -} \ No newline at end of file diff --git a/api-ref/source/samples/cluster-create-req.json b/api-ref/source/samples/cluster-create-req.json deleted file mode 100644 index fa1f36dd..00000000 --- a/api-ref/source/samples/cluster-create-req.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "name":"k8s", - "discovery_url":null, - "master_count":2, - "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", - "node_count":2, - "create_timeout":60, - "keypair":"my_keypair" -} diff --git a/api-ref/source/samples/cluster-create-resp.json b/api-ref/source/samples/cluster-create-resp.json deleted file mode 100644 index 99d440d9..00000000 --- a/api-ref/source/samples/cluster-create-resp.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "uuid":"746e779a-751a-456b-a3e9-c883d734946f" -} \ No newline at end of file diff --git a/api-ref/source/samples/cluster-get-all-resp.json b/api-ref/source/samples/cluster-get-all-resp.json deleted file mode 100644 index 5c181fa8..00000000 --- a/api-ref/source/samples/cluster-get-all-resp.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "clusters":[ - { - "status":"CREATE_IN_PROGRESS", - "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", - "uuid":"731387cf-a92b-4c36-981e-3271d63e5597", - "links":[ - { - "href":"http://10.164.180.104:9511/v1/bays/731387cf-a92b-4c36-981e-3271d63e5597", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/bays/731387cf-a92b-4c36-981e-3271d63e5597", - "rel":"bookmark" - } - ], - "stack_id":"31c1ee6c-081e-4f39-9f0f-f1d87a7defa1", - "keypair":"my_keypair", - "master_count":1, - "create_timeout":60, - "node_count":1, - "name":"k8s" - } - ] -} diff --git a/api-ref/source/samples/cluster-get-one-resp.json b/api-ref/source/samples/cluster-get-one-resp.json deleted file mode 100644 index 5cc7e7dc..00000000 --- a/api-ref/source/samples/cluster-get-one-resp.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "status":"CREATE_COMPLETE", - "uuid":"746e779a-751a-456b-a3e9-c883d734946f", - "links":[ - { - "href":"http://10.164.180.104:9511/v1/clusters/746e779a-751a-456b-a3e9-c883d734946f", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/clusters/746e779a-751a-456b-a3e9-c883d734946f", - "rel":"bookmark" - } - ], - "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", - "created_at":"2016-08-29T06:51:31+00:00", - "api_address":"https://172.24.4.6:6443", - "discovery_url":"https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", - "updated_at":"2016-08-29T06:53:24+00:00", - "master_count":1, - "coe_version": "v1.2.0", - "keypair":"my_keypair" - "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", - "master_addresses":[ - "172.24.4.6" - ], - "node_count":1, - "node_addresses":[ - "172.24.4.13" - ], - "status_reason":"Stack CREATE completed successfully", - "create_timeout":60, - "name":"k8s" -} diff --git a/api-ref/source/samples/cluster-update-req.json b/api-ref/source/samples/cluster-update-req.json deleted file mode 100644 index c01e5edd..00000000 --- a/api-ref/source/samples/cluster-update-req.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "path":"/node_count", - "value":2, - "op":"replace" - } -] \ No newline at end of file diff --git a/api-ref/source/samples/clustertemplate-create-req.json b/api-ref/source/samples/clustertemplate-create-req.json deleted file mode 100644 index 38bb40c5..00000000 --- a/api-ref/source/samples/clustertemplate-create-req.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "labels":{ - - }, - "fixed_subnet":null, - "master_flavor_id":null, - "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", - "https_proxy":"http://10.164.177.169:8080", - "tls_disabled":false, - "keypair_id":"kp", - "public":false, - "http_proxy":"http://10.164.177.169:8080", - "docker_volume_size":3, - "server_type":"vm", - "external_network_id":"public", - "image_id":"fedora-atomic-latest", - "volume_driver":"cinder", - "registry_enabled":false, - "docker_storage_driver":"devicemapper", - "name":"k8s-bm2", - "network_driver":"flannel", - "fixed_network":null, - "coe":"kubernetes", - "flavor_id":"m1.small", - "master_lb_enabled":true, - "dns_nameserver":"8.8.8.8" -} \ No newline at end of file diff --git a/api-ref/source/samples/clustertemplate-create-resp.json b/api-ref/source/samples/clustertemplate-create-resp.json deleted file mode 100644 index 7f6f116e..00000000 --- a/api-ref/source/samples/clustertemplate-create-resp.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "insecure_registry":null, - "links":[ - { - "href":"http://10.164.180.104:9511/v1/clustertemplates/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/clustertemplates/085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "rel":"bookmark" - } - ], - "http_proxy":"http://10.164.177.169:8080", - "updated_at":null, - "floating_ip_enabled":true, - "fixed_subnet":null, - "master_flavor_id":null, - "uuid":"085e1c4d-4f68-4bfd-8462-74b9e14e4f39", - "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", - "https_proxy":"http://10.164.177.169:8080", - "tls_disabled":false, - "keypair_id":"kp", - "public":false, - "labels":{ - - }, - "docker_volume_size":3, - "server_type":"vm", - "external_network_id":"public", - "cluster_distro":"fedora-atomic", - "image_id":"fedora-atomic-latest", - "volume_driver":"cinder", - "registry_enabled":false, - "docker_storage_driver":"devicemapper", - "apiserver_port":null, - "name":"k8s-bm2", - "created_at":"2016-08-29T02:08:08+00:00", - "network_driver":"flannel", - "fixed_network":null, - "coe":"kubernetes", - "flavor_id":"m1.small", - "master_lb_enabled":true, - "dns_nameserver":"8.8.8.8" -} \ No newline at end of file diff --git a/api-ref/source/samples/clustertemplate-get-all-resp.json b/api-ref/source/samples/clustertemplate-get-all-resp.json deleted file mode 100644 index 24099500..00000000 --- a/api-ref/source/samples/clustertemplate-get-all-resp.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "clustertemplates":[ - { - "insecure_registry":null, - "links":[ - { - "href":"http://10.164.180.104:9511/v1/clustertemplates/0562d357-8641-4759-8fed-8173f02c9633", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/clustertemplates/0562d357-8641-4759-8fed-8173f02c9633", - "rel":"bookmark" - } - ], - "http_proxy":"http://10.164.177.169:8080", - "updated_at":null, - "floating_ip_enabled":true, - "fixed_subnet":null, - "master_flavor_id":null, - "uuid":"0562d357-8641-4759-8fed-8173f02c9633", - "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", - "https_proxy":"http://10.164.177.169:8080", - "tls_disabled":false, - "keypair_id":"kp", - "public":false, - "labels":{ - - }, - "docker_volume_size":3, - "server_type":"vm", - "external_network_id":"public", - "cluster_distro":"fedora-atomic", - "image_id":"fedora-atomic-latest", - "volume_driver":"cinder", - "registry_enabled":false, - "docker_storage_driver":"devicemapper", - "apiserver_port":null, - "name":"k8s-bm", - "created_at":"2016-08-26T09:34:41+00:00", - "network_driver":"flannel", - "fixed_network":null, - "coe":"kubernetes", - "flavor_id":"m1.small", - "master_lb_enabled":false, - "dns_nameserver":"8.8.8.8" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/clustertemplate-update-req.json b/api-ref/source/samples/clustertemplate-update-req.json deleted file mode 100644 index 9c604dc3..00000000 --- a/api-ref/source/samples/clustertemplate-update-req.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "path":"/master_lb_enabled", - "value":"True", - "op":"replace" - }, - { - "path":"/registry_enabled", - "value":"True", - "op":"replace" - } -] \ No newline at end of file diff --git a/api-ref/source/samples/mservice-get-resp.json b/api-ref/source/samples/mservice-get-resp.json deleted file mode 100644 index e5efc814..00000000 --- a/api-ref/source/samples/mservice-get-resp.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "mservices":[ - { - "binary":"magnum-conductor", - "created_at":"2016-08-23T10:52:13+00:00", - "state":"up", - "report_count":2179, - "updated_at":"2016-08-25T01:13:16+00:00", - "host":"magnum-manager", - "disabled_reason":null, - "id":1 - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/quota-create-req.json b/api-ref/source/samples/quota-create-req.json deleted file mode 100644 index c5f941c9..00000000 --- a/api-ref/source/samples/quota-create-req.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", - "resource": "Cluster", - "hard_limit": 10 -} \ No newline at end of file diff --git a/api-ref/source/samples/quota-create-resp.json b/api-ref/source/samples/quota-create-resp.json deleted file mode 100644 index 05499eed..00000000 --- a/api-ref/source/samples/quota-create-resp.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "resource": "Cluster", - "created_at": "2017-01-17T17:35:48+00:00", - "updated_at": null, - "hard_limit": 1, - "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", - "id": 26 -} \ No newline at end of file diff --git a/api-ref/source/samples/quota-delete-req.json b/api-ref/source/samples/quota-delete-req.json deleted file mode 100755 index e16a93bf..00000000 --- a/api-ref/source/samples/quota-delete-req.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", - "resource": "Cluster" -} \ No newline at end of file diff --git a/api-ref/source/samples/quota-get-all-resp.json b/api-ref/source/samples/quota-get-all-resp.json deleted file mode 100644 index c9321fba..00000000 --- a/api-ref/source/samples/quota-get-all-resp.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "quotas": [ - { - "resource": "Cluster", - "created_at": "2017-01-17T17:35:49+00:00", - "updated_at": "2017-01-17T17:38:21+00:00", - "hard_limit": 10, - "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", - "id": 26 - } - ] -} \ No newline at end of file diff --git a/api-ref/source/samples/quota-get-one-resp.json b/api-ref/source/samples/quota-get-one-resp.json deleted file mode 100644 index 0cb18cd8..00000000 --- a/api-ref/source/samples/quota-get-one-resp.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "resource": "Cluster", - "created_at": "2017-01-17T17:35:49+00:00", - "updated_at": "2017-01-17T17:38:20+00:00", - "hard_limit": 10, - "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", - "id": 26 -} \ No newline at end of file diff --git a/api-ref/source/samples/quota-update-req.json b/api-ref/source/samples/quota-update-req.json deleted file mode 100755 index c5f941c9..00000000 --- a/api-ref/source/samples/quota-update-req.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", - "resource": "Cluster", - "hard_limit": 10 -} \ No newline at end of file diff --git a/api-ref/source/samples/quota-update-resp.json b/api-ref/source/samples/quota-update-resp.json deleted file mode 100644 index 0cb18cd8..00000000 --- a/api-ref/source/samples/quota-update-resp.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "resource": "Cluster", - "created_at": "2017-01-17T17:35:49+00:00", - "updated_at": "2017-01-17T17:38:20+00:00", - "hard_limit": 10, - "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", - "id": 26 -} \ No newline at end of file diff --git a/api-ref/source/samples/stats-get-resp.json b/api-ref/source/samples/stats-get-resp.json deleted file mode 100644 index 3fe26ddc..00000000 --- a/api-ref/source/samples/stats-get-resp.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "clusters": 1, - "nodes": 2 -} diff --git a/api-ref/source/samples/versions-01-get-resp.json b/api-ref/source/samples/versions-01-get-resp.json deleted file mode 100644 index 299d001e..00000000 --- a/api-ref/source/samples/versions-01-get-resp.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "media_types":[ - { - "base":"application/json", - "type":"application/vnd.openstack.magnum.v1+json" - } - ], - "links":[ - { - "href":"http://10.164.180.104:9511/v1/", - "rel":"self" - }, - { - "href":"http://docs.openstack.org/developer/magnum/dev/api-spec-v1.html", - "type":"text/html", - "rel":"describedby" - } - ], - "mservices":[ - { - "href":"http://10.164.180.104:9511/v1/mservices/", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/mservices/", - "rel":"bookmark" - } - ], - "bays":[ - { - "href":"http://10.164.180.104:9511/v1/bays/", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/bays/", - "rel":"bookmark" - } - ], - "clustertemplates":[ - { - "href":"http://10.164.180.104:9511/v1/clustertemplates/", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/clustertemplates/", - "rel":"bookmark" - } - ], - "certificates":[ - { - "href":"http://10.164.180.104:9511/v1/certificates/", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/certificates/", - "rel":"bookmark" - } - ], - "clusters":[ - { - "href":"http://10.164.180.104:9511/v1/clusters/", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/clusters/", - "rel":"bookmark" - } - ], - "baymodels":[ - { - "href":"http://10.164.180.104:9511/v1/baymodels/", - "rel":"self" - }, - { - "href":"http://10.164.180.104:9511/baymodels/", - "rel":"bookmark" - } - ], - "id":"v1" -} \ No newline at end of file diff --git a/api-ref/source/samples/versions-get-resp.json b/api-ref/source/samples/versions-get-resp.json deleted file mode 100644 index 319d9bd8..00000000 --- a/api-ref/source/samples/versions-get-resp.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "versions":[ - { - "status":"CURRENT", - "min_version":"1.1", - "max_version":"1.4", - "id":"v1", - "links":[ - { - "href":"http://10.164.180.104:9511/v1/", - "rel":"self" - } - ] - } - ], - "name":"OpenStack Magnum API", - "description":"Magnum is an OpenStack project which aims to provide container management." -} \ No newline at end of file diff --git a/api-ref/source/stats.inc b/api-ref/source/stats.inc deleted file mode 100644 index d6000604..00000000 --- a/api-ref/source/stats.inc +++ /dev/null @@ -1,82 +0,0 @@ -.. -*- rst -*- - -================= - Magnum Stats API -================= - -An admin user can get stats for the given tenant and also overall system stats. -A non-admin user can get self stats. - -Show stats for a tenant -======================= - -.. rest_method:: GET /v1/stats?project_id= - -Get stats based on project id. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id - -Response --------- - -.. rest_parameters:: parameters.yaml - - - clusters: clusters - - nodes: nodes - -Response Example ----------------- - -.. literalinclude:: samples/stats-get-resp.json - :language: javascript - -Show overall stats -================== - -.. rest_method:: GET /v1/stats - -Show overall Magnum system stats. -If the requester is non-admin user show self stats. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - clusters: clusters - - nodes: nodes - -Response Example ----------------- - -.. literalinclude:: samples/stats-get-resp.json - :language: javascript diff --git a/api-ref/source/status.yaml b/api-ref/source/status.yaml deleted file mode 100644 index b4d27fed..00000000 --- a/api-ref/source/status.yaml +++ /dev/null @@ -1,62 +0,0 @@ -################# -# Success Codes # -################# -200: - default: | - Request was successful. -201: - default: | - Resource was created and is ready to use. -202: - default: | - Request was accepted for processing, but the processing has not been - completed. A 'location' header is included in the response which contains - a link to check the progress of the request. -204: - default: | - The server has fulfilled the request by deleting the resource. -300: - default: | - There are multiple choices for resources. The request has to be more - specific to successfully retrieve one of these resources. -302: - default: | - The response is about a redirection hint. The header of the response - usually contains a 'location' value where requesters can check to track - the real location of the resource. - -################# -# Error Codes # -################# - -400: - default: | - Some content in the request was invalid. - resource_signal: | - The target resource doesn't support receiving a signal. -401: - default: | - User must authenticate before making a request. -403: - default: | - Policy does not allow current user to do this operation. -404: - default: | - The requested resource could not be found. -405: - default: | - Method is not valid for this endpoint. -409: - default: | - This operation conflicted with another operation on this resource. - duplicate_zone: | - There is already a zone with this name. -500: - default: | - Something went wrong inside the service. This should not happen usually. - If it does happen, it means the server has experienced some serious - problems. -503: - default: | - Service is not available. This is mostly caused by service configuration - errors which prevents the service from successful start up. diff --git a/api-ref/source/urls.inc b/api-ref/source/urls.inc deleted file mode 100644 index b8c37ac2..00000000 --- a/api-ref/source/urls.inc +++ /dev/null @@ -1,31 +0,0 @@ -.. -*- rst -*- - -================= - Magnum Base URLs -================= - -All API calls through the rest of this document require authentication -with the OpenStack Identity service. They also required a ``url`` that -is extracted from the Identity token of type -``container-infra``. This will be the root url that every call below will be -added to build a full path. - -Note that if using OpenStack Identity service API v2, ``url`` can be -represented via ``adminURL``, ``internalURL`` or ``publicURL`` in endpoint -catalog. In Identity service API v3, ``url`` is represented with field -``interface`` including ``admin``, ``internal`` and ``public``. - -For instance, if the ``url`` is -``http://my-container-infra.org/magnum/v1`` then the full API call for -``/clusters`` is ``http://my-container-infra.org/magnum/v1/clusters``. - -Depending on the deployment the container infrastructure management service -url might be http or https, a custom port, a custom path, and include your -project id. The only way to know the urls for your deployment is by using the -service catalog. The container infrastructure management URL should never be -hard coded in applications, even if they are only expected to work at a -single site. It should always be discovered from the Identity token. - -As such, for the rest of this document we will be using short hand -where ``GET /clusters`` really means -``GET {your_container_infra_url}/clusters``. diff --git a/api-ref/source/versions.inc b/api-ref/source/versions.inc deleted file mode 100644 index 96286a7f..00000000 --- a/api-ref/source/versions.inc +++ /dev/null @@ -1,104 +0,0 @@ -.. -*- rst -*- - -============== - API Versions -============== - -In order to bring new features to users over time, the Magnum API -supports versioning. There are two kinds of versions in Magnum. - -- ''major versions'', which have dedicated urls -- ''microversions'', which can be requested through the use of the - ``OpenStack-API-Version``. - -Beginning with the Newton release, all API requests support the -``OpenStack-API-Version`` header. This header SHOULD be supplied -with every request; in the absence of this header, each request is treated -as though coming from an older pre-Newton client. This was done to preserve -backwards compatibility as we introduced new features. - -The Version APIs work differently from other APIs as they *do not* -require authentication. - -List API Versions -======================= - -.. rest_method:: GET / - -This fetches all the information about all known major API versions in -the deployment. Links to more specific information will be provided -for each API version, as well as information about supported min and -max microversions. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 503 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - versions: version - - status: version_status - - min_version: version_min - - max_version: version_max - - id: version_id - - links: links - - name: name - - description: description - -Response Example ----------------- - -.. literalinclude:: samples/versions-get-resp.json - :language: javascript - - -Show v1 API Version -==================================== - -.. rest_method:: GET /v1/ - -Show all the resources within the Magnum v1 API. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 503 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - id: version_id - - links: links - -.. note:: - - The ``media-types`` parameters in the response are - vestigial and provide no useful information. They will probably be - deprecated and removed in the future. - - -Response Example ----------------- - -.. literalinclude:: samples/versions-01-get-resp.json - :language: javascript diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb7..00000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/contrib/drivers/dcos_centos_v1/README.md b/contrib/drivers/dcos_centos_v1/README.md deleted file mode 100644 index 35fccf25..00000000 --- a/contrib/drivers/dcos_centos_v1/README.md +++ /dev/null @@ -1,103 +0,0 @@ -How to build a centos image which contains DC/OS 1.8.x -====================================================== - -Here is the advanced DC/OS 1.8 installation guide. - -See [Advanced DC/OS Installation Guide] -(https://dcos.io/docs/1.8/administration/installing/custom/advanced/) -See [Install Docker on CentOS] -(https://dcos.io/docs/1.8/administration/installing/custom/system-requirements/install-docker-centos/) -See [Adding agent nodes] -(https://dcos.io/docs/1.8/administration/installing/custom/add-a-node/) - -Create a centos image using DIB following the steps outlined in DC/OS installation guide. - -1. Install and configure docker in chroot. -2. Install system requirements in chroot. -3. Download `dcos_generate_config.sh` outside chroot. - This file will be used to run `dcos_generate_config.sh --genconf` to generate - config files on the node during magnum cluster creation. -4. Some configuration changes are required for DC/OS, i.e disabling the firewalld - and adding the group named nogroup. - See comments in the script file. - -Use the centos image to build a DC/OS cluster. -Command: - `magnum cluster-template-create` - `magnum cluster-create` - -After all the instances with centos image are created. -1. Pass parameters to config.yaml with magnum cluster template properties. -2. Run `dcos_generate_config.sh --genconf` to generate config files. -3. Run `dcos_install.sh master` on master node and `dcos_install.sh slave` on slave node. - -If we want to scale the DC/OS cluster. -Command: - `magnum cluster-update` - -The same steps as cluster creation. -1. Create new instances, generate config files on them and install. -2. Or delete those agent nodes where containers are not running. - - -How to use magnum dcos coe -=============================================== - -We are assuming that magnum has been installed and the magnum path is `/opt/stack/magnum`. - -1. Copy dcos magnum coe source code -$ mv -r /opt/stack/magnum/contrib/drivers/dcos_centos_v1 /opt/stack/magnum/magnum/drivers/ -$ mv /opt/stack/magnum/contrib/drivers/common/dcos_* /opt/stack/magnum/magnum/drivers/common/ -$ cd /opt/stack/magnum -$ sudo python setup.py install - -2. Add driver in setup.cfg -dcos_centos_v1 = magnum.drivers.dcos_centos_v1.driver:Driver - -3. Restart your magnum services. - -4. Prepare centos image with elements dcos and docker installed - See how to build a centos image in /opt/stack/magnum/magnum/drivers/dcos_centos_v1/image/README.md - -5. Create glance image -$ openstack image create centos-7-dcos.qcow2 \ - --public \ - --disk-format=qcow2 \ - --container-format=bare \ - --property os_distro=centos \ - --file=centos-7-dcos.qcow2 - -6. Create magnum cluster template - Configure DC/OS cluster with --labels - See https://dcos.io/docs/1.8/administration/installing/custom/configuration-parameters/ -$ magnum cluster-template-create --name dcos-cluster-template \ - --image-id centos-7-dcos.qcow2 \ - --keypair-id testkey \ - --external-network-id public \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.medium \ - --labels oauth_enabled=false \ - --coe dcos - - Here is an example to specify the overlay network in DC/OS, - 'dcos_overlay_network' should be json string format. -$ magnum cluster-template-create --name dcos-cluster-template \ - --image-id centos-7-dcos.qcow2 \ - --keypair-id testkey \ - --external-network-id public \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.medium \ - --labels oauth_enabled=false \ - --labels dcos_overlay_enable='true' \ - --labels dcos_overlay_config_attempts='6' \ - --labels dcos_overlay_mtu='9001' \ - --labels dcos_overlay_network='{"vtep_subnet": "44.128.0.0/20",\ - "vtep_mac_oui": "70:B3:D5:00:00:00","overlays":\ - [{"name": "dcos","subnet": "9.0.0.0/8","prefix": 26}]}' \ - --coe dcos - -7. Create magnum cluster -$ magnum cluster-create --name dcos-cluster --cluster-template dcos-cluster-template --node-count 1 - -8. You need to wait for a while after magnum cluster creation completed to make - DC/OS web interface accessible. diff --git a/contrib/drivers/dcos_centos_v1/__init__.py b/contrib/drivers/dcos_centos_v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/contrib/drivers/dcos_centos_v1/driver.py b/contrib/drivers/dcos_centos_v1/driver.py deleted file mode 100644 index 51e445ab..00000000 --- a/contrib/drivers/dcos_centos_v1/driver.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.drivers.dcos_centos_v1 import monitor -from magnum.drivers.dcos_centos_v1.scale_manager import DcosScaleManager -from magnum.drivers.dcos_centos_v1 import template_def -from magnum.drivers.heat import driver - - -class Driver(driver.HeatDriver): - - @property - def provides(self): - return [ - {'server_type': 'vm', - 'os': 'centos', - 'coe': 'dcos'}, - ] - - def get_template_definition(self): - return template_def.DcosCentosVMTemplateDefinition() - - def get_monitor(self, context, cluster): - return monitor.DcosMonitor(context, cluster) - - def get_scale_manager(self, context, osclient, cluster): - return DcosScaleManager(context, osclient, cluster) diff --git a/contrib/drivers/dcos_centos_v1/image/README.md b/contrib/drivers/dcos_centos_v1/image/README.md deleted file mode 100644 index c95514a5..00000000 --- a/contrib/drivers/dcos_centos_v1/image/README.md +++ /dev/null @@ -1,86 +0,0 @@ -============= -centos-dcos -============= - -This directory contains `[diskimage-builder](https://github.com/openstack/diskimage-builder)` -elements to build an centos image which contains dcos. - - -Pre-requisites to run diskimage-builder ---------------------------------------- - -For diskimage-builder to work, following packages need to be -present: - -* kpartx -* qemu-utils -* curl -* xfsprogs -* yum -* yum-utils -* git - -For Debian/Ubuntu systems, use:: - - apt-get install kpartx qemu-utils curl xfsprogs yum yum-utils git - -For CentOS and Fedora < 22, use:: - - yum install kpartx qemu-utils curl xfsprogs yum yum-utils git - -For Fedora >= 22, use:: - - dnf install kpartx @virtualization curl xfsprogs yum yum-utils git - - -How to generate Centos image with DC/OS 1.8.x ---------------------------------------------- - -1. Download and export element path - - git clone https://git.openstack.org/openstack/magnum - git clone https://git.openstack.org/openstack/diskimage-builder.git - git clone https://git.openstack.org/openstack/dib-utils.git - git clone https://git.openstack.org/openstack/tripleo-image-elements.git - git clone https://git.openstack.org/openstack/heat-templates.git - - export PATH="${PWD}/diskimage-builder/bin:$PATH" - export PATH="${PWD}/dib-utils/bin:$PATH" - export ELEMENTS_PATH=magnum/contrib/drivers/dcos_centos_v1/image - export ELEMENTS_PATH=${ELEMENTS_PATH}:diskimage-builder/elements - export ELEMENTS_PATH=${ELEMENTS_PATH}:tripleo-image-elements/elements:heat-templates/hot/software-config/elements - -2. Export environment path of the url to download dcos_generate_config.sh - This default download url is for DC/OS 1.8.4 - - export DCOS_GENERATE_CONFIG_SRC=https://downloads.dcos.io/dcos/stable/commit/e64024af95b62c632c90b9063ed06296fcf38ea5/dcos_generate_config.sh - - Or specify local file path - - export DCOS_GENERATE_CONFIG_SRC=`pwd`/dcos_generate_config.sh - -3. Set file system type to `xfs` - Only XFS is currently supported for overlay. - See https://dcos.io/docs/1.8/administration/installing/custom/system-requirements/install-docker-centos/#recommendations - - export FS_TYPE=xfs - -4. Create image - - disk-image-create \ - centos7 vm docker dcos selinux-permissive \ - os-collect-config os-refresh-config os-apply-config \ - heat-config heat-config-script \ - -o centos-7-dcos.qcow2 - -5. (Optional) Create user image for bare metal node - Create with elements dhcp-all-interfaces and devuser - - export DIB_DEV_USER_USERNAME=centos - export DIB_DEV_USER_PWDLESS_SUDO=YES - - disk-image-create \ - centos7 vm docker dcos selinux-permissive dhcp-all-interfaces devuser \ - os-collect-config os-refresh-config os-apply-config \ - heat-config heat-config-script \ - -o centos-7-dcos-bm.qcow2 diff --git a/contrib/drivers/dcos_centos_v1/image/dcos/elements-deps b/contrib/drivers/dcos_centos_v1/image/dcos/elements-deps deleted file mode 100644 index db54096a..00000000 --- a/contrib/drivers/dcos_centos_v1/image/dcos/elements-deps +++ /dev/null @@ -1,2 +0,0 @@ -package-installs -docker diff --git a/contrib/drivers/dcos_centos_v1/image/dcos/environment.d/10-dcos-install-url b/contrib/drivers/dcos_centos_v1/image/dcos/environment.d/10-dcos-install-url deleted file mode 100755 index 164da6ed..00000000 --- a/contrib/drivers/dcos_centos_v1/image/dcos/environment.d/10-dcos-install-url +++ /dev/null @@ -1,5 +0,0 @@ -# Specify download url, default DC/OS version 1.8.4 -export DCOS_GENERATE_CONFIG_SRC=${DCOS_GENERATE_CONFIG_SRC:-https://downloads.dcos.io/dcos/stable/commit/e64024af95b62c632c90b9063ed06296fcf38ea5/dcos_generate_config.sh} - -# or local file path -# export DCOS_GENERATE_CONFIG_SRC=${DCOS_GENERATE_CONFIG_SRC:-${PWD}/dcos_generate_config.sh} diff --git a/contrib/drivers/dcos_centos_v1/image/dcos/extra-data.d/99-download-generate-config b/contrib/drivers/dcos_centos_v1/image/dcos/extra-data.d/99-download-generate-config deleted file mode 100755 index a87bd761..00000000 --- a/contrib/drivers/dcos_centos_v1/image/dcos/extra-data.d/99-download-generate-config +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -# This scrpit file is used to dowload dcos_generate_config.sh outside chroot. -# Ihis file is essential that the size of dcos_generate_config.sh is more than -# 700M, we should download it into the image in advance. - -sudo mkdir -p $TMP_MOUNT_PATH/opt/dcos - -if [ -f $DCOS_GENERATE_CONFIG_SRC ]; then - # If $DCOS_GENERATE_CONFIG_SRC is a file path, copy the file - sudo cp $DCOS_GENERATE_CONFIG_SRC $TMP_MOUNT_PATH/opt/dcos -else - # If $DCOS_GENERATE_CONFIG_SRC is a url, download it - # Please make sure curl is installed on your host environment - cd $TMP_MOUNT_PATH/opt/dcos - sudo -E curl -O $DCOS_GENERATE_CONFIG_SRC -fi diff --git a/contrib/drivers/dcos_centos_v1/image/dcos/package-installs.yaml b/contrib/drivers/dcos_centos_v1/image/dcos/package-installs.yaml deleted file mode 100644 index 038180e9..00000000 --- a/contrib/drivers/dcos_centos_v1/image/dcos/package-installs.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tar: -xz: -unzip: -curl: -ipset: -ntp: diff --git a/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-add-norgoup b/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-add-norgoup deleted file mode 100755 index 6c2b61d7..00000000 --- a/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-add-norgoup +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -# nogroup will be used on Mesos masters and agents. -sudo groupadd nogroup diff --git a/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-enable-ntp b/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-enable-ntp deleted file mode 100644 index fc22a429..00000000 --- a/contrib/drivers/dcos_centos_v1/image/dcos/post-install.d/99-enable-ntp +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -sudo systemctl enable ntpd diff --git a/contrib/drivers/dcos_centos_v1/image/docker/elements-deps b/contrib/drivers/dcos_centos_v1/image/docker/elements-deps deleted file mode 100644 index 7076aba9..00000000 --- a/contrib/drivers/dcos_centos_v1/image/docker/elements-deps +++ /dev/null @@ -1 +0,0 @@ -package-installs diff --git a/contrib/drivers/dcos_centos_v1/image/docker/install.d/50-install-docker b/contrib/drivers/dcos_centos_v1/image/docker/install.d/50-install-docker deleted file mode 100755 index 3315d640..00000000 --- a/contrib/drivers/dcos_centos_v1/image/docker/install.d/50-install-docker +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -# Install the Docker engine, daemon, and service. -# -# The supported versions of Docker are: -# 1.7.x -# 1.8.x -# 1.9.x -# 1.10.x -# 1.11.x -# Docker 1.12.x is NOT supported. -# Docker 1.9.x - 1.11.x is recommended for stability reasons. -# https://github.com/docker/docker/issues/9718 -# -# See DC/OS installtion guide for details -# https://dcos.io/docs/1.8/administration/installing/custom/system-requirements/install-docker-centos/ -# -sudo -E yum install -y docker-engine-1.11.2 diff --git a/contrib/drivers/dcos_centos_v1/image/docker/post-install.d/60-enable-docker-service b/contrib/drivers/dcos_centos_v1/image/docker/post-install.d/60-enable-docker-service deleted file mode 100755 index c4fd9441..00000000 --- a/contrib/drivers/dcos_centos_v1/image/docker/post-install.d/60-enable-docker-service +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -sudo systemctl enable docker diff --git a/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/10-enable-overlay b/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/10-enable-overlay deleted file mode 100755 index b19a5838..00000000 --- a/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/10-enable-overlay +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -# Upgrade CentOS to 7.2 -sudo -E yum upgrade --assumeyes --tolerant -sudo -E yum update --assumeyes - -# Verify that the kernel is at least 3.10 -function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; } - -kernel_version=`uname -r | cut --bytes=1-4` -expect_version=3.10 -if version_gt $expect_version $kernel_version; then - echo "Error: kernel version at least $expect_version, current version $kernel_version" - exit 1 -fi - -# Enable OverlayFS -sudo tee /etc/modules-load.d/overlay.conf <<-'EOF' -overlay -EOF diff --git a/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/20-configure-docker-service b/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/20-configure-docker-service deleted file mode 100755 index af2a8adc..00000000 --- a/contrib/drivers/dcos_centos_v1/image/docker/pre-install.d/20-configure-docker-service +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -# Configure yum to use the Docker yum repo -sudo tee /etc/yum.repos.d/docker.repo <<-'EOF' -[dockerrepo] -name=Docker Repository -baseurl=https://yum.dockerproject.org/repo/main/centos/7/ -enabled=1 -gpgcheck=1 -gpgkey=https://yum.dockerproject.org/gpg -EOF - -# Configure systemd to run the Docker Daemon with OverlayFS -# Manage Docker on CentOS with systemd. -# systemd handles starting Docker on boot and restarting it when it crashes. -# -# Docker 1.11.x will be installed, so issue for Docker 1.12.x on Centos7 -# won't happen. -# https://github.com/docker/docker/issues/22847 -# https://github.com/docker/docker/issues/25098 -# -sudo mkdir -p /etc/systemd/system/docker.service.d -sudo tee /etc/systemd/system/docker.service.d/override.conf <<- 'EOF' -[Service] -ExecStart= -ExecStart=/usr/bin/docker daemon --storage-driver=overlay -H fd:// -EOF diff --git a/contrib/drivers/dcos_centos_v1/image/install_imagebuild_deps.sh b/contrib/drivers/dcos_centos_v1/image/install_imagebuild_deps.sh deleted file mode 100755 index 4f1256af..00000000 --- a/contrib/drivers/dcos_centos_v1/image/install_imagebuild_deps.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# This script installs all needed dependencies to generate -# images using diskimage-builder. Please note it only has been -# tested on Ubuntu Xenial. - -set -eux -set -o pipefail - -sudo apt update || true -sudo apt install -y \ - git \ - qemu-utils \ - python-dev \ - python-yaml \ - python-six \ - uuid-runtime \ - curl \ - sudo \ - kpartx \ - parted \ - wget \ - xfsprogs \ - yum \ - yum-utils diff --git a/contrib/drivers/dcos_centos_v1/image/validate_dcos_image.sh b/contrib/drivers/dcos_centos_v1/image/validate_dcos_image.sh deleted file mode 100755 index da14166e..00000000 --- a/contrib/drivers/dcos_centos_v1/image/validate_dcos_image.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2016 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -# check that image is valid -qemu-img check -q $1 - -# validate estimated size -FILESIZE=$(stat -c%s "$1") -MIN_SIZE=1231028224 # 1.15GB -MAX_SIZE=1335885824 # 1.25GB - -if [ $FILESIZE -lt $MIN_SIZE ] ; then - echo "Error: generated image size is lower than expected." - exit 1 -fi - -if [ $FILESIZE -gt $MAX_SIZE ] ; then - echo "Error: generated image size is higher than expected." - exit 1 -fi diff --git a/contrib/drivers/dcos_centos_v1/monitor.py b/contrib/drivers/dcos_centos_v1/monitor.py deleted file mode 100644 index fa845f6c..00000000 --- a/contrib/drivers/dcos_centos_v1/monitor.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils - -from magnum.common import urlfetch -from magnum.conductor import monitors - - -class DcosMonitor(monitors.MonitorBase): - - def __init__(self, context, cluster): - super(DcosMonitor, self).__init__(context, cluster) - self.data = {} - - @property - def metrics_spec(self): - return { - 'memory_util': { - 'unit': '%', - 'func': 'compute_memory_util', - }, - 'cpu_util': { - 'unit': '%', - 'func': 'compute_cpu_util', - }, - } - - # See https://github.com/dcos/adminrouter#ports-summary - # Use http:///mesos/ instead of http://:5050 - def _build_url(self, url, protocol='http', server_name='mesos', path='/'): - return protocol + '://' + url + '/' + server_name + path - - def _is_leader(self, state): - return state['leader'] == state['pid'] - - def pull_data(self): - self.data['mem_total'] = 0 - self.data['mem_used'] = 0 - self.data['cpu_total'] = 0 - self.data['cpu_used'] = 0 - for master_addr in self.cluster.master_addresses: - mesos_master_url = self._build_url(master_addr, - server_name='mesos', - path='/state') - master = jsonutils.loads(urlfetch.get(mesos_master_url)) - if self._is_leader(master): - for slave in master['slaves']: - self.data['mem_total'] += slave['resources']['mem'] - self.data['mem_used'] += slave['used_resources']['mem'] - self.data['cpu_total'] += slave['resources']['cpus'] - self.data['cpu_used'] += slave['used_resources']['cpus'] - break - - def compute_memory_util(self): - if self.data['mem_total'] == 0 or self.data['mem_used'] == 0: - return 0 - else: - return self.data['mem_used'] * 100 / self.data['mem_total'] - - def compute_cpu_util(self): - if self.data['cpu_total'] == 0 or self.data['cpu_used'] == 0: - return 0 - else: - return self.data['cpu_used'] * 100 / self.data['cpu_total'] diff --git a/contrib/drivers/dcos_centos_v1/scale_manager.py b/contrib/drivers/dcos_centos_v1/scale_manager.py deleted file mode 100644 index 8b3846c7..00000000 --- a/contrib/drivers/dcos_centos_v1/scale_manager.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.conductor.scale_manager import ScaleManager -from marathon import MarathonClient - - -class DcosScaleManager(ScaleManager): - - def __init__(self, context, osclient, cluster): - super(DcosScaleManager, self).__init__(context, osclient, cluster) - - def _get_hosts_with_container(self, context, cluster): - marathon_client = MarathonClient( - 'http://' + cluster.api_address + '/marathon/') - hosts = set() - for task in marathon_client.list_tasks(): - hosts.add(task.host) - - return hosts diff --git a/contrib/drivers/dcos_centos_v1/template_def.py b/contrib/drivers/dcos_centos_v1/template_def.py deleted file mode 100644 index 41de7fbe..00000000 --- a/contrib/drivers/dcos_centos_v1/template_def.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from magnum.drivers.heat import dcos_centos_template_def as dctd - - -class DcosCentosVMTemplateDefinition(dctd.DcosCentosTemplateDefinition): - """DC/OS template for Centos VM.""" - - @property - def driver_module_path(self): - return __name__[:__name__.rindex('.')] - - @property - def template_path(self): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'templates/dcoscluster.yaml') diff --git a/contrib/drivers/dcos_centos_v1/templates/dcoscluster.yaml b/contrib/drivers/dcos_centos_v1/templates/dcoscluster.yaml deleted file mode 100644 index d413f376..00000000 --- a/contrib/drivers/dcos_centos_v1/templates/dcoscluster.yaml +++ /dev/null @@ -1,674 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This template will boot a DC/OS cluster with one or more masters - (as specified by number_of_masters, default is 1) and one or more slaves - (as specified by the number_of_slaves parameter, which - defaults to 1). - -parameters: - - cluster_name: - type: string - description: human readable name for the DC/OS cluster - default: my-cluster - - number_of_masters: - type: number - description: how many DC/OS masters to spawn initially - default: 1 - - # In DC/OS, there are two types of slave nodes, public and private. - # Public slave nodes have external access and private slave nodes don't. - # Magnum only supports one type of slave nodes and I decide not to modify - # cluster template properties. So I create slave nodes as private agents. - number_of_slaves: - type: number - description: how many DC/OS agents or slaves to spawn initially - default: 1 - - master_flavor: - type: string - default: m1.medium - description: flavor to use when booting the master servers - - slave_flavor: - type: string - default: m1.medium - description: flavor to use when booting the slave servers - - server_image: - type: string - default: centos-dcos - description: glance image used to boot the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - default: public - - fixed_network: - type: string - description: uuid/name of an existing network to use to provision machines - default: "" - - fixed_subnet: - type: string - description: uuid/name of an existing subnet to use to provision machines - default: "" - - fixed_network_cidr: - type: string - description: network range for fixed ip network - default: 10.0.0.0/24 - - dns_nameserver: - type: string - description: address of a dns nameserver reachable in your environment - - http_proxy: - type: string - description: http proxy address for docker - default: "" - - https_proxy: - type: string - description: https proxy address for docker - default: "" - - no_proxy: - type: string - description: no proxies for docker - default: "" - - ###################################################################### - # - # Rexray Configuration - # - - trustee_domain_id: - type: string - description: domain id of the trustee - default: "" - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_username: - type: string - description: username of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - ###################################################################### - # - # Rexray Configuration - # - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - username: - type: string - description: user name - - tenant_name: - type: string - description: > - tenant_name is used to isolate access to cloud resources - - domain_name: - type: string - description: > - domain is to define the administrative boundaries for management - of Keystone entities - - region_name: - type: string - description: a logically separate section of the cluster - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - default: "false" - - auth_url: - type: string - description: url for keystone - - slaves_to_remove: - type: comma_delimited_list - description: > - List of slaves to be removed when doing an update. Individual slave may - be referenced several ways: (1) The resource name (e.g.['1', '3']), - (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should - be empty when doing a create. - default: [] - - wait_condition_timeout: - type: number - description: > - timeout for the Wait Conditions - default: 6000 - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for DC/OS config file - default: - password - hidden: true - - ###################################################################### - # - # DC/OS parameters - # - - # cluster_name - - exhibitor_storage_backend: - type: string - default: "static" - - exhibitor_zk_hosts: - type: string - default: "" - - exhibitor_zk_path: - type: string - default: "" - - aws_access_key_id: - type: string - default: "" - - aws_region: - type: string - default: "" - - aws_secret_access_key: - type: string - default: "" - - exhibitor_explicit_keys: - type: string - default: "" - - s3_bucket: - type: string - default: "" - - s3_prefix: - type: string - default: "" - - exhibitor_azure_account_name: - type: string - default: "" - - exhibitor_azure_account_key: - type: string - default: "" - - exhibitor_azure_prefix: - type: string - default: "" - - # master_discovery default set to "static" - # If --master-lb-enabled is specified, - # master_discovery will be set to "master_http_loadbalancer" - master_discovery: - type: string - default: "static" - - # master_list - - # exhibitor_address - - # num_masters - - #################################################### - # Networking - - dcos_overlay_enable: - type: string - default: "" - constraints: - - allowed_values: - - "true" - - "false" - - "" - - dcos_overlay_config_attempts: - type: string - default: "" - - dcos_overlay_mtu: - type: string - default: "" - - dcos_overlay_network: - type: string - default: "" - - dns_search: - type: string - description: > - This parameter specifies a space-separated list of domains that - are tried when an unqualified domain is entered - default: "" - - # resolvers - - # use_proxy - - #################################################### - # Performance and Tuning - - check_time: - type: string - default: "true" - constraints: - - allowed_values: - - "true" - - "false" - - docker_remove_delay: - type: number - default: 1 - - gc_delay: - type: number - default: 2 - - log_directory: - type: string - default: "/genconf/logs" - - process_timeout: - type: number - default: 120 - - #################################################### - # Security And Authentication - - oauth_enabled: - type: string - default: "true" - constraints: - - allowed_values: - - "true" - - "false" - - telemetry_enabled: - type: string - default: "true" - constraints: - - allowed_values: - - "true" - - "false" - -resources: - - ###################################################################### - # - # network resources. allocate a network and router for our server. - # - - network: - type: ../../common/templates/network.yaml - properties: - existing_network: {get_param: fixed_network} - existing_subnet: {get_param: fixed_subnet} - private_network_cidr: {get_param: fixed_network_cidr} - dns_nameserver: {get_param: dns_nameserver} - external_network: {get_param: external_network} - - api_lb: - type: lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup: - type: secgroup.yaml - - ###################################################################### - # - # resources that expose the IPs of either the dcos master or a given - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_attr: [api_lb, floating_address]} - pool_private_ip: {get_attr: [api_lb, address]} - master_public_ip: {get_attr: [dcos_masters, resource.0.dcos_master_external_ip]} - master_private_ip: {get_attr: [dcos_masters, resource.0.dcos_master_ip]} - - ###################################################################### - # - # Master SoftwareConfig. - # - - write_params_master: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: fragments/write-heat-params.sh} - inputs: - - name: HTTP_PROXY - type: String - - name: HTTPS_PROXY - type: String - - name: NO_PROXY - type: String - - name: AUTH_URL - type: String - - name: USERNAME - type: String - - name: PASSWORD - type: String - - name: TENANT_NAME - type: String - - name: VOLUME_DRIVER - type: String - - name: REGION_NAME - type: String - - name: DOMAIN_NAME - type: String - - name: REXRAY_PREEMPT - type: String - - name: CLUSTER_NAME - type: String - - name: EXHIBITOR_STORAGE_BACKEND - type: String - - name: EXHIBITOR_ZK_HOSTS - type: String - - name: EXHIBITOR_ZK_PATH - type: String - - name: AWS_ACCESS_KEY_ID - type: String - - name: AWS_REGION - type: String - - name: AWS_SECRET_ACCESS_KEY - type: String - - name: EXHIBITOR_EXPLICIT_KEYS - type: String - - name: S3_BUCKET - type: String - - name: S3_PREFIX - type: String - - name: EXHIBITOR_AZURE_ACCOUNT_NAME - type: String - - name: EXHIBITOR_AZURE_ACCOUNT_KEY - type: String - - name: EXHIBITOR_AZURE_PREFIX - type: String - - name: MASTER_DISCOVERY - type: String - - name: MASTER_LIST - type: String - - name: EXHIBITOR_ADDRESS - type: String - - name: NUM_MASTERS - type: String - - name: DCOS_OVERLAY_ENABLE - type: String - - name: DCOS_OVERLAY_CONFIG_ATTEMPTS - type: String - - name: DCOS_OVERLAY_MTU - type: String - - name: DCOS_OVERLAY_NETWORK - type: String - - name: DNS_SEARCH - type: String - - name: RESOLVERS - type: String - - name: CHECK_TIME - type: String - - name: DOCKER_REMOVE_DELAY - type: String - - name: GC_DELAY - type: String - - name: LOG_DIRECTORY - type: String - - name: PROCESS_TIMEOUT - type: String - - name: OAUTH_ENABLED - type: String - - name: TELEMETRY_ENABLED - type: String - - name: ROLES - type: String - - ###################################################################### - # - # DC/OS configuration SoftwareConfig. - # Configuration files are readered and injected into instance. - # - - dcos_config: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: fragments/configure-dcos.sh} - - ###################################################################### - # - # Master SoftwareDeployment. - # - - write_params_master_deployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - config: {get_resource: write_params_master} - servers: {get_attr: [dcos_masters, attributes, dcos_server_id]} - input_values: - HTTP_PROXY: {get_param: http_proxy} - HTTPS_PROXY: {get_param: https_proxy} - NO_PROXY: {get_param: no_proxy} - AUTH_URL: {get_param: auth_url} - USERNAME: {get_param: username} - PASSWORD: {get_param: password} - TENANT_NAME: {get_param: tenant_name} - VOLUME_DRIVER: {get_param: volume_driver} - REGION_NAME: {get_param: region_name} - DOMAIN_NAME: {get_param: domain_name} - REXRAY_PREEMPT: {get_param: rexray_preempt} - CLUSTER_NAME: {get_param: cluster_name} - EXHIBITOR_STORAGE_BACKEND: {get_param: exhibitor_storage_backend} - EXHIBITOR_ZK_HOSTS: {get_param: exhibitor_zk_hosts} - EXHIBITOR_ZK_PATH: {get_param: exhibitor_zk_path} - AWS_ACCESS_KEY_ID: {get_param: aws_access_key_id} - AWS_REGION: {get_param: aws_region} - AWS_SECRET_ACCESS_KEY: {get_param: aws_secret_access_key} - EXHIBITOR_EXPLICIT_KEYS: {get_param: exhibitor_explicit_keys} - S3_BUCKET: {get_param: s3_bucket} - S3_PREFIX: {get_param: s3_prefix} - EXHIBITOR_AZURE_ACCOUNT_NAME: {get_param: exhibitor_azure_account_name} - EXHIBITOR_AZURE_ACCOUNT_KEY: {get_param: exhibitor_azure_account_key} - EXHIBITOR_AZURE_PREFIX: {get_param: exhibitor_azure_prefix} - MASTER_DISCOVERY: {get_param: master_discovery} - MASTER_LIST: {list_join: [' ', {get_attr: [dcos_masters, dcos_master_ip]}]} - EXHIBITOR_ADDRESS: {get_attr: [api_lb, address]} - NUM_MASTERS: {get_param: number_of_masters} - DCOS_OVERLAY_ENABLE: {get_param: dcos_overlay_enable} - DCOS_OVERLAY_CONFIG_ATTEMPTS: {get_param: dcos_overlay_config_attempts} - DCOS_OVERLAY_MTU: {get_param: dcos_overlay_mtu} - DCOS_OVERLAY_NETWORK: {get_param: dcos_overlay_network} - DNS_SEARCH: {get_param: dns_search} - RESOLVERS: {get_param: dns_nameserver} - CHECK_TIME: {get_param: check_time} - DOCKER_REMOVE_DELAY: {get_param: docker_remove_delay} - GC_DELAY: {get_param: gc_delay} - LOG_DIRECTORY: {get_param: log_directory} - PROCESS_TIMEOUT: {get_param: process_timeout} - OAUTH_ENABLED: {get_param: oauth_enabled} - TELEMETRY_ENABLED: {get_param: telemetry_enabled} - ROLES: master - - dcos_config_deployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: - - write_params_master_deployment - properties: - config: {get_resource: dcos_config} - servers: {get_attr: [dcos_masters, attributes, dcos_server_id]} - - ###################################################################### - # - # DC/OS masters. This is a resource group that will create - # masters. - # - - dcos_masters: - type: OS::Heat::ResourceGroup - depends_on: - - network - properties: - count: {get_param: number_of_masters} - resource_def: - type: dcosmaster.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - master_flavor: {get_param: master_flavor} - external_network: {get_param: external_network} - fixed_network: {get_attr: [network, fixed_network]} - fixed_subnet: {get_attr: [network, fixed_subnet]} - secgroup_base_id: {get_attr: [secgroup, secgroup_base_id]} - secgroup_dcos_id: {get_attr: [secgroup, secgroup_dcos_id]} - api_pool_80_id: {get_attr: [api_lb, pool_80_id]} - api_pool_443_id: {get_attr: [api_lb, pool_443_id]} - api_pool_8080_id: {get_attr: [api_lb, pool_8080_id]} - api_pool_5050_id: {get_attr: [api_lb, pool_5050_id]} - api_pool_2181_id: {get_attr: [api_lb, pool_2181_id]} - api_pool_8181_id: {get_attr: [api_lb, pool_8181_id]} - - ###################################################################### - # - # DC/OS slaves. This is a resource group that will initially - # create public or private slaves, - # and needs to be manually scaled. - # - - dcos_slaves: - type: OS::Heat::ResourceGroup - depends_on: - - network - properties: - count: {get_param: number_of_slaves} - removal_policies: [{resource_list: {get_param: slaves_to_remove}}] - resource_def: - type: dcosslave.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - slave_flavor: {get_param: slave_flavor} - fixed_network: {get_attr: [network, fixed_network]} - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - wait_condition_timeout: {get_param: wait_condition_timeout} - secgroup_base_id: {get_attr: [secgroup, secgroup_base_id]} - # DC/OS params - auth_url: {get_param: auth_url} - username: {get_param: username} - password: {get_param: password} - tenant_name: {get_param: tenant_name} - volume_driver: {get_param: volume_driver} - region_name: {get_param: region_name} - domain_name: {get_param: domain_name} - rexray_preempt: {get_param: rexray_preempt} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - cluster_name: {get_param: cluster_name} - exhibitor_storage_backend: {get_param: exhibitor_storage_backend} - exhibitor_zk_hosts: {get_param: exhibitor_zk_hosts} - exhibitor_zk_path: {get_param: exhibitor_zk_path} - aws_access_key_id: {get_param: aws_access_key_id} - aws_region: {get_param: aws_region} - aws_secret_access_key: {get_param: aws_secret_access_key} - exhibitor_explicit_keys: {get_param: exhibitor_explicit_keys} - s3_bucket: {get_param: s3_bucket} - s3_prefix: {get_param: s3_prefix} - exhibitor_azure_account_name: {get_param: exhibitor_azure_account_name} - exhibitor_azure_account_key: {get_param: exhibitor_azure_account_key} - exhibitor_azure_prefix: {get_param: exhibitor_azure_prefix} - master_discovery: {get_param: master_discovery} - master_list: {list_join: [' ', {get_attr: [dcos_masters, dcos_master_ip]}]} - exhibitor_address: {get_attr: [api_lb, address]} - num_masters: {get_param: number_of_masters} - dcos_overlay_enable: {get_param: dcos_overlay_enable} - dcos_overlay_config_attempts: {get_param: dcos_overlay_config_attempts} - dcos_overlay_mtu: {get_param: dcos_overlay_mtu} - dcos_overlay_network: {get_param: dcos_overlay_network} - dns_search: {get_param: dns_search} - resolvers: {get_param: dns_nameserver} - check_time: {get_param: check_time} - docker_remove_delay: {get_param: docker_remove_delay} - gc_delay: {get_param: gc_delay} - log_directory: {get_param: log_directory} - process_timeout: {get_param: process_timeout} - oauth_enabled: {get_param: oauth_enabled} - telemetry_enabled: {get_param: telemetry_enabled} - -outputs: - - api_address: - value: {get_attr: [api_address_lb_switch, public_ip]} - description: > - This is the API endpoint of the DC/OS master. Use this to access - the DC/OS API from outside the cluster. - - dcos_master_private: - value: {get_attr: [dcos_masters, dcos_master_ip]} - description: > - This is a list of the "private" addresses of all the DC/OS masters. - - dcos_master: - value: {get_attr: [dcos_masters, dcos_master_external_ip]} - description: > - This is the "public" ip address of the DC/OS master server. Use this address to - log in to the DC/OS master via ssh or to access the DC/OS API - from outside the cluster. - - dcos_slaves_private: - value: {get_attr: [dcos_slaves, dcos_slave_ip]} - description: > - This is a list of the "private" addresses of all the DC/OS slaves. - - dcos_slaves: - value: {get_attr: [dcos_slaves, dcos_slave_external_ip]} - description: > - This is a list of the "public" addresses of all the DC/OS slaves. diff --git a/contrib/drivers/dcos_centos_v1/templates/dcosmaster.yaml b/contrib/drivers/dcos_centos_v1/templates/dcosmaster.yaml deleted file mode 100644 index f46dee4a..00000000 --- a/contrib/drivers/dcos_centos_v1/templates/dcosmaster.yaml +++ /dev/null @@ -1,161 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single DC/OS master, This stack is - included by a ResourceGroup resource in the parent template - (dcoscluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - secgroup_base_id: - type: string - description: ID of the security group for base. - - secgroup_dcos_id: - type: string - description: ID of the security group for DC/OS master. - - api_pool_80_id: - type: string - description: ID of the load balancer pool of Http. - - api_pool_443_id: - type: string - description: ID of the load balancer pool of Https. - - api_pool_8080_id: - type: string - description: ID of the load balancer pool of Marathon. - - api_pool_5050_id: - type: string - description: ID of the load balancer pool of Mesos master. - - api_pool_2181_id: - type: string - description: ID of the load balancer pool of Zookeeper. - - api_pool_8181_id: - type: string - description: ID of the load balancer pool of Exhibitor. - -resources: - - ###################################################################### - # - # DC/OS master server. - # - - dcos_master: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: master_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: SOFTWARE_CONFIG - networks: - - port: {get_resource: dcos_master_eth0} - - dcos_master_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - {get_param: secgroup_base_id} - - {get_param: secgroup_dcos_id} - fixed_ips: - - subnet: {get_param: fixed_subnet} - replacement_policy: AUTO - - dcos_master_floating: - type: Magnum::Optional::DcosMaster::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: dcos_master_eth0} - - api_pool_80_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_80_id} - address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 80 - - api_pool_443_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_443_id} - address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 443 - - api_pool_8080_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_8080_id} - address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 8080 - - api_pool_5050_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_5050_id} - address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 5050 - - api_pool_2181_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_2181_id} - address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 2181 - - api_pool_8181_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_8181_id} - address: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 8181 - -outputs: - - dcos_master_ip: - value: {get_attr: [dcos_master_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" address of the DC/OS master node. - dcos_master_external_ip: - value: {get_attr: [dcos_master_floating, floating_ip_address]} - description: > - This is the "public" address of the DC/OS master node. - dcos_server_id: - value: {get_resource: dcos_master} - description: > - This is the logical id of the DC/OS master node. diff --git a/contrib/drivers/dcos_centos_v1/templates/dcosslave.yaml b/contrib/drivers/dcos_centos_v1/templates/dcosslave.yaml deleted file mode 100644 index 088f0b08..00000000 --- a/contrib/drivers/dcos_centos_v1/templates/dcosslave.yaml +++ /dev/null @@ -1,338 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single DC/OS slave, This stack is - included by a ResourceGroup resource in the parent template - (dcoscluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - slave_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - auth_url: - type: string - description: > - url for DC/OS to authenticate before sending request - - username: - type: string - description: user name - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for Kubernetes config file - hidden: true - - tenant_name: - type: string - description: > - tenant_name is used to isolate access to Compute resources - - volume_driver: - type: string - description: volume driver to use for container storage - - region_name: - type: string - description: A logically separate section of the cluster - - domain_name: - type: string - description: > - domain is to define the administrative boundaries for management - of Keystone entities - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - secgroup_base_id: - type: string - description: ID of the security group for base. - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - - ###################################################################### - # - # DC/OS parameters - # - cluster_name: - type: string - description: human readable name for the DC/OS cluster - default: my-cluster - - exhibitor_storage_backend: - type: string - - exhibitor_zk_hosts: - type: string - - exhibitor_zk_path: - type: string - - aws_access_key_id: - type: string - - aws_region: - type: string - - aws_secret_access_key: - type: string - - exhibitor_explicit_keys: - type: string - - s3_bucket: - type: string - - s3_prefix: - type: string - - exhibitor_azure_account_name: - type: string - - exhibitor_azure_account_key: - type: string - - exhibitor_azure_prefix: - type: string - - master_discovery: - type: string - - master_list: - type: string - - exhibitor_address: - type: string - default: 127.0.0.1 - - num_masters: - type: number - - dcos_overlay_enable: - type: string - - dcos_overlay_config_attempts: - type: string - - dcos_overlay_mtu: - type: string - - dcos_overlay_network: - type: string - - dns_search: - type: string - - resolvers: - type: string - - check_time: - type: string - - docker_remove_delay: - type: number - - gc_delay: - type: number - - log_directory: - type: string - - process_timeout: - type: number - - oauth_enabled: - type: string - - telemetry_enabled: - type: string - -resources: - - slave_wait_handle: - type: OS::Heat::WaitConditionHandle - - slave_wait_condition: - type: OS::Heat::WaitCondition - depends_on: dcos_slave - properties: - handle: {get_resource: slave_wait_handle} - timeout: {get_param: wait_condition_timeout} - - secgroup_all_open: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params.sh} - params: - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$AUTH_URL": {get_param: auth_url} - "$USERNAME": {get_param: username} - "$PASSWORD": {get_param: password} - "$TENANT_NAME": {get_param: tenant_name} - "$VOLUME_DRIVER": {get_param: volume_driver} - "$REGION_NAME": {get_param: region_name} - "$DOMAIN_NAME": {get_param: domain_name} - "$REXRAY_PREEMPT": {get_param: rexray_preempt} - "$CLUSTER_NAME": {get_param: cluster_name} - "$EXHIBITOR_STORAGE_BACKEND": {get_param: exhibitor_storage_backend} - "$EXHIBITOR_ZK_HOSTS": {get_param: exhibitor_zk_hosts} - "$EXHIBITOR_ZK_PATH": {get_param: exhibitor_zk_path} - "$AWS_ACCESS_KEY_ID": {get_param: aws_access_key_id} - "$AWS_REGION": {get_param: aws_region} - "$AWS_SECRET_ACCESS_KEY": {get_param: aws_secret_access_key} - "$EXHIBITOR_EXPLICIT_KEYS": {get_param: exhibitor_explicit_keys} - "$S3_BUCKET": {get_param: s3_bucket} - "$S3_PREFIX": {get_param: s3_prefix} - "$EXHIBITOR_AZURE_ACCOUNT_NAME": {get_param: exhibitor_azure_account_name} - "$EXHIBITOR_AZURE_ACCOUNT_KEY": {get_param: exhibitor_azure_account_key} - "$EXHIBITOR_AZURE_PREFIX": {get_param: exhibitor_azure_prefix} - "$MASTER_DISCOVERY": {get_param: master_discovery} - "$MASTER_LIST": {get_param: master_list} - "$EXHIBITOR_ADDRESS": {get_param: exhibitor_address} - "$NUM_MASTERS": {get_param: num_masters} - "$DCOS_OVERLAY_ENABLE": {get_param: dcos_overlay_enable} - "$DCOS_OVERLAY_CONFIG_ATTEMPTS": {get_param: dcos_overlay_config_attempts} - "$DCOS_OVERLAY_MTU": {get_param: dcos_overlay_mtu} - "$DCOS_OVERLAY_NETWORK": {get_param: dcos_overlay_network} - "$DNS_SEARCH": {get_param: dns_search} - "$RESOLVERS": {get_param: resolvers} - "$CHECK_TIME": {get_param: check_time} - "$DOCKER_REMOVE_DELAY": {get_param: docker_remove_delay} - "$GC_DELAY": {get_param: gc_delay} - "$LOG_DIRECTORY": {get_param: log_directory} - "$PROCESS_TIMEOUT": {get_param: process_timeout} - "$OAUTH_ENABLED": {get_param: oauth_enabled} - "$TELEMETRY_ENABLED": {get_param: telemetry_enabled} - "$ROLES": slave - - dcos_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-dcos.sh} - - slave_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - #!/bin/bash -v - wc_notify --data-binary '{"status": "SUCCESS"}' - params: - wc_notify: {get_attr: [slave_wait_handle, curl_cli]} - - dcos_slave_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: write_heat_params} - - config: {get_resource: dcos_config} - - config: {get_resource: slave_wc_notify} - - ###################################################################### - # - # a single DC/OS slave. - # - - dcos_slave: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: slave_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: dcos_slave_init} - networks: - - port: {get_resource: dcos_slave_eth0} - - dcos_slave_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - get_resource: secgroup_all_open - - get_param: secgroup_base_id - fixed_ips: - - subnet: {get_param: fixed_subnet} - - dcos_slave_floating: - type: Magnum::Optional::DcosSlave::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: dcos_slave_eth0} - -outputs: - - dcos_slave_ip: - value: {get_attr: [dcos_slave_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" address of the DC/OS slave node. - - dcos_slave_external_ip: - value: {get_attr: [dcos_slave_floating, floating_ip_address]} - description: > - This is the "public" address of the DC/OS slave node. diff --git a/contrib/drivers/dcos_centos_v1/templates/fragments/configure-dcos.sh b/contrib/drivers/dcos_centos_v1/templates/fragments/configure-dcos.sh deleted file mode 100644 index b5929824..00000000 --- a/contrib/drivers/dcos_centos_v1/templates/fragments/configure-dcos.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/bin/bash - -. /etc/sysconfig/heat-params - -GENCONF_SCRIPT_DIR=/opt/dcos - -sudo mkdir -p $GENCONF_SCRIPT_DIR/genconf -sudo chown -R centos $GENCONF_SCRIPT_DIR/genconf - -# Configure ip-detect -cat > $GENCONF_SCRIPT_DIR/genconf/ip-detect < $CONFIG_YAML_FILE - -# cluster_name -echo "cluster_name: $CLUSTER_NAME" >> $CONFIG_YAML_FILE - -# exhibitor_storage_backend -if [ "static" == "$EXHIBITOR_STORAGE_BACKEND" ]; then - echo "exhibitor_storage_backend: static" >> $CONFIG_YAML_FILE -elif [ "zookeeper" == "$EXHIBITOR_STORAGE_BACKEND" ]; then - echo "exhibitor_storage_backend: zookeeper" >> $CONFIG_YAML_FILE - echo "exhibitor_zk_hosts: $EXHIBITOR_ZK_HOSTS" >> $CONFIG_YAML_FILE - echo "exhibitor_zk_path: $EXHIBITOR_ZK_PATH" >> $CONFIG_YAML_FILE -elif [ "aws_s3" == "$EXHIBITOR_STORAGE_BACKEND" ]; then - echo "exhibitor_storage_backend: aws_s3" >> $CONFIG_YAML_FILE - echo "aws_access_key_id: $AWS_ACCESS_KEY_ID" >> $CONFIG_YAML_FILE - echo "aws_region: $AWS_REGIION" >> $CONFIG_YAML_FILE - echo "aws_secret_access_key: $AWS_SECRET_ACCESS_KEY" >> $CONFIG_YAML_FILE - echo "exhibitor_explicit_keys: $EXHIBITOR_EXPLICIT_KEYS" >> $CONFIG_YAML_FILE - echo "s3_bucket: $S3_BUCKET" >> $CONFIG_YAML_FILE - echo "s3_prefix: $S3_PREFIX" >> $CONFIG_YAML_FILE -elif [ "azure" == "$EXHIBITOR_STORAGE_BACKEND" ]; then - echo "exhibitor_storage_backend: azure" >> $CONFIG_YAML_FILE - echo "exhibitor_azure_account_name: $EXHIBITOR_AZURE_ACCOUNT_NAME" >> $CONFIG_YAML_FILE - echo "exhibitor_azure_account_key: $EXHIBITOR_AZURE_ACCOUNT_KEY" >> $CONFIG_YAML_FILE - echo "exhibitor_azure_prefix: $EXHIBITOR_AZURE_PREFIX" >> $CONFIG_YAML_FILE -fi - -# master_discovery -if [ "static" == "$MASTER_DISCOVERY" ]; then - echo "master_discovery: static" >> $CONFIG_YAML_FILE - echo "master_list:" >> $CONFIG_YAML_FILE - for ip in $MASTER_LIST; do - echo "- ${ip}" >> $CONFIG_YAML_FILE - done -elif [ "master_http_loadbalancer" == "$MASTER_DISCOVERY" ]; then - echo "master_discovery: master_http_loadbalancer" >> $CONFIG_YAML_FILE - echo "exhibitor_address: $EXHIBITOR_ADDRESS" >> $CONFIG_YAML_FILE - echo "num_masters: $NUM_MASTERS" >> $CONFIG_YAML_FILE - echo "master_list:" >> $CONFIG_YAML_FILE - for ip in $MASTER_LIST; do - echo "- ${ip}" >> $CONFIG_YAML_FILE - done -fi - -#################################################### -# Networking - -# dcos_overlay_enable -if [ "false" == "$DCOS_OVERLAY_ENABLE" ]; then - echo "dcos_overlay_enable: false" >> $CONFIG_YAML_FILE -elif [ "true" == "$DCOS_OVERLAY_ENABLE" ]; then - echo "dcos_overlay_enable: true" >> $CONFIG_YAML_FILE - echo "dcos_overlay_config_attempts: $DCOS_OVERLAY_CONFIG_ATTEMPTS" >> $CONFIG_YAML_FILE - echo "dcos_overlay_mtu: $DCOS_OVERLAY_MTU" >> $CONFIG_YAML_FILE - echo "dcos_overlay_network:" >> $CONFIG_YAML_FILE - echo "$DCOS_OVERLAY_NETWORK" >> $CONFIG_YAML_FILE -fi - -# dns_search -if [ -n "$DNS_SEARCH" ]; then - echo "dns_search: $DNS_SEARCH" >> $CONFIG_YAML_FILE -fi - -# resolvers -echo "resolvers:" >> $CONFIG_YAML_FILE -for ip in $RESOLVERS; do -echo "- ${ip}" >> $CONFIG_YAML_FILE -done - -# use_proxy -if [ -n "$HTTP_PROXY" ] && [ -n "$HTTPS_PROXY" ]; then -echo "use_proxy: true" >> $CONFIG_YAML_FILE -echo "http_proxy: $HTTP_PROXY" >> $CONFIG_YAML_FILE -echo "https_proxy: $HTTPS_PROXY" >> $CONFIG_YAML_FILE -if [ -n "$NO_PROXY" ]; then - echo "no_proxy:" >> $CONFIG_YAML_FILE - for ip in $NO_PROXY; do - echo "- ${ip}" >> $CONFIG_YAML_FILE - done -fi -fi - -#################################################### -# Performance and Tuning - -# check_time -if [ "false" == "$CHECK_TIME" ]; then - echo "check_time: false" >> $CONFIG_YAML_FILE -fi - -# docker_remove_delay -if [ "1" != "$DOCKER_REMOVE_DELAY" ]; then - echo "docker_remove_delay: $DOCKER_REMOVE_DELAY" >> $CONFIG_YAML_FILE -fi - -# gc_delay -if [ "2" != "$GC_DELAY" ]; then - echo "gc_delay: $GC_DELAY" >> $CONFIG_YAML_FILE -fi - -# log_directory -if [ "/genconf/logs" != "$LOG_DIRECTORY" ]; then - echo "log_directory: $LOG_DIRECTORY" >> $CONFIG_YAML_FILE -fi - -# process_timeout -if [ "120" != "$PROCESS_TIMEOUT" ]; then - echo "process_timeout: $PROCESS_TIMEOUT" >> $CONFIG_YAML_FILE -fi - -#################################################### -# Security And Authentication - -# oauth_enabled -if [ "false" == "$OAUTH_ENABLED" ]; then - echo "oauth_enabled: false" >> $CONFIG_YAML_FILE -fi - -# telemetry_enabled -if [ "false" == "$TELEMETRY_ENABLED" ]; then - echo "telemetry_enabled: false" >> $CONFIG_YAML_FILE -fi - -#################################################### -# Rexray Configuration - -# NOTE: This feature is considered experimental: use it at your own risk. -# We might add, change, or delete any functionality as described in this document. -# See https://dcos.io/docs/1.8/usage/storage/external-storage/ -if [ "$VOLUME_DRIVER" == "rexray" ]; then - -if [ ${AUTH_URL##*/}=="v3" ]; then - extra_configs="domainName: $DOMAIN_NAME" -else - extra_configs="" -fi - - echo "rexray_config:" >> $CONFIG_YAML_FILE - echo " rexray:" >> $CONFIG_YAML_FILE - echo " modules:" >> $CONFIG_YAML_FILE - echo " default-admin:" >> $CONFIG_YAML_FILE - echo " host: tcp://127.0.0.1:61003" >> $CONFIG_YAML_FILE - echo " storageDrivers:" >> $CONFIG_YAML_FILE - echo " - openstack" >> $CONFIG_YAML_FILE - echo " volume:" >> $CONFIG_YAML_FILE - echo " mount:" >> $CONFIG_YAML_FILE - echo " preempt: $REXRAY_PREEMPT" >> $CONFIG_YAML_FILE - echo " openstack:" >> $CONFIG_YAML_FILE - echo " authUrl: $AUTH_URL" >> $CONFIG_YAML_FILE - echo " username: $USERNAME" >> $CONFIG_YAML_FILE - echo " password: $PASSWORD" >> $CONFIG_YAML_FILE - echo " tenantName: $TENANT_NAME" >> $CONFIG_YAML_FILE - echo " regionName: $REGION_NAME" >> $CONFIG_YAML_FILE - echo " availabilityZoneName: nova" >> $CONFIG_YAML_FILE - echo " $extra_configs" >> $CONFIG_YAML_FILE -fi - - -cd $GENCONF_SCRIPT_DIR -sudo bash $GENCONF_SCRIPT_DIR/dcos_generate_config.sh --genconf - -cd $GENCONF_SCRIPT_DIR/genconf/serve -sudo bash $GENCONF_SCRIPT_DIR/genconf/serve/dcos_install.sh --no-block-dcos-setup $ROLES diff --git a/contrib/drivers/dcos_centos_v1/templates/fragments/write-heat-params.sh b/contrib/drivers/dcos_centos_v1/templates/fragments/write-heat-params.sh deleted file mode 100644 index 9f7284b7..00000000 --- a/contrib/drivers/dcos_centos_v1/templates/fragments/write-heat-params.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/sh - -mkdir -p /etc/sysconfig -cat > /etc/sysconfig/heat-params < -# : -# BELONGS TO : Operating System images -# : -# DESCRIPTION : configuration script for SUSE based -# : operating systems -# : -# : -# STATUS : BETA -#---------------- -#====================================== -# Functions... -#-------------------------------------- -test -f /.kconfig && . /.kconfig -test -f /.profile && . /.profile - -mkdir /var/lib/misc/reconfig_system - -#====================================== -# Greeting... -#-------------------------------------- -echo "Configure image: [$name]..." - -#====================================== -# add missing fonts -#-------------------------------------- -CONSOLE_FONT="lat9w-16.psfu" - -#====================================== -# prepare for setting root pw, timezone -#-------------------------------------- -echo ** "reset machine settings" -sed -i 's/^root:[^:]*:/root:*:/' /etc/shadow -rm /etc/machine-id -rm /etc/localtime -rm /var/lib/zypp/AnonymousUniqueId -rm /var/lib/systemd/random-seed - -#====================================== -# SuSEconfig -#-------------------------------------- -echo "** Running suseConfig..." -suseConfig - -echo "** Running ldconfig..." -/sbin/ldconfig - -#====================================== -# Setup baseproduct link -#-------------------------------------- -suseSetupProduct - -#====================================== -# Specify default runlevel -#-------------------------------------- -baseSetRunlevel 3 - -#====================================== -# Add missing gpg keys to rpm -#-------------------------------------- -suseImportBuildKey - -#====================================== -# Firewall Configuration -#-------------------------------------- -echo '** Configuring firewall...' -chkconfig SuSEfirewall2_init on -chkconfig SuSEfirewall2_setup on - -#====================================== -# Enable sshd -#-------------------------------------- -chkconfig sshd on - -#====================================== -# Remove doc files -#-------------------------------------- -baseStripDocs - -#====================================== -# remove rpms defined in config.xml in the image type=delete section -#-------------------------------------- -baseStripRPM - -#====================================== -# Sysconfig Update -#-------------------------------------- -echo '** Update sysconfig entries...' -baseUpdateSysConfig /etc/sysconfig/SuSEfirewall2 FW_CONFIGURATIONS_EXT sshd -baseUpdateSysConfig /etc/sysconfig/console CONSOLE_FONT "$CONSOLE_FONT" -# baseUpdateSysConfig /etc/sysconfig/snapper SNAPPER_CONFIGS root -if [[ "${kiwi_iname}" != *"OpenStack"* ]]; then - baseUpdateSysConfig /etc/sysconfig/network/dhcp DHCLIENT_SET_HOSTNAME yes -fi - -# true -#====================================== -# SSL Certificates Configuration -#-------------------------------------- -echo '** Rehashing SSL Certificates...' -update-ca-certificates - -if [ ! -s /var/log/zypper.log ]; then - > /var/log/zypper.log -fi - -# only for debugging -#systemctl enable debug-shell.service - -baseCleanMount - -exit 0 diff --git a/contrib/drivers/k8s_opensuse_v1/image/images.sh b/contrib/drivers/k8s_opensuse_v1/image/images.sh deleted file mode 100644 index e37aba73..00000000 --- a/contrib/drivers/k8s_opensuse_v1/image/images.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -#================ -# FILE : image.sh -#---------------- -# PROJECT : openSUSE KIWI Image System -# COPYRIGHT : (c) 2006 SUSE LINUX Products GmbH. All rights reserved -# : -# AUTHOR : Marcus Schaefer -# : -# BELONGS TO : Operating System images -# : -# DESCRIPTION : configuration script for SUSE based -# : operating systems -# : -# : -# STATUS : BETA -#---------------- - -test -f /.kconfig && . /.kconfig -test -f /.profile && . /.profile - -if [[ "${kiwi_iname}" = *"OpenStack"* ]]; then - # disable jeos-firstboot service - # We need to install it because it provides files required in the - # overlay for the image. However, the service itself is something that - # requires interaction on boot, which is not good for OpenStack, and the - # interaction actually doesn't bring any benefit in OpenStack. - systemctl mask jeos-firstboot.service - - # enable cloud-init services - suseInsertService cloud-init-local - suseInsertService cloud-init - suseInsertService cloud-config - suseInsertService cloud-final - - echo '*** adjusting cloud.cfg for openstack' - sed -i -e '/mount_default_fields/{adatasource_list: [ NoCloud, OpenStack, None ] - }' /etc/cloud/cloud.cfg -fi diff --git a/contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.kiwi b/contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.kiwi deleted file mode 100644 index a36c2529..00000000 --- a/contrib/drivers/k8s_opensuse_v1/image/openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.kiwi +++ /dev/null @@ -1,160 +0,0 @@ - - - - SUSE Containers Team - docker-devel@suse.de - Kubernetes openSUSE Leap 42.1 image for OpenStack Magnum - - - 1.1.1 - zypper - openSUSE - openSUSE - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/contrib/drivers/k8s_opensuse_v1/setup.py b/contrib/drivers/k8s_opensuse_v1/setup.py deleted file mode 100644 index ee14343c..00000000 --- a/contrib/drivers/k8s_opensuse_v1/setup.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2016 SUSE Linux GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - name="k8s_opensuse_v1", - version="1.0", - packages=['k8s_opensuse_v1'], - package_data={ - 'k8s_opensuse_v1': ['templates/*', 'templates/fragments/*'] - }, - author="SUSE Linux GmbH", - author_email="opensuse-cloud@opensuse.org", - description="Magnum openSUSE Kubernetes driver", - license="Apache", - keywords="magnum opensuse driver", - entry_points={ - 'magnum.template_definitions': [ - 'k8s_opensuse_v1 = k8s_opensuse_v1:JeOSK8sTemplateDefinition' - ] - } -) diff --git a/contrib/drivers/k8s_opensuse_v1/template_def.py b/contrib/drivers/k8s_opensuse_v1/template_def.py deleted file mode 100644 index 54e52960..00000000 --- a/contrib/drivers/k8s_opensuse_v1/template_def.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import magnum.conf -from magnum.drivers.common import k8s_template_def -from magnum.drivers.common import template_def - -CONF = magnum.conf.CONF - - -class JeOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition): - """Kubernetes template for openSUSE/SLES JeOS VM.""" - - def __init__(self): - super(JeOSK8sTemplateDefinition, self).__init__() - self.add_parameter('docker_volume_size', - cluster_template_attr='docker_volume_size') - self.add_output('kube_minions', - cluster_attr='node_addresses') - self.add_output('kube_masters', - cluster_attr='master_addresses') - - def get_params(self, context, cluster_template, cluster, **kwargs): - extra_params = kwargs.pop('extra_params', {}) - - extra_params['username'] = context.user_name - extra_params['tenant_name'] = context.tenant - - return super(JeOSK8sTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=extra_params, - **kwargs) - - def get_env_files(self, cluster_template, cluster): - env_files = [] - if cluster_template.master_lb_enabled: - env_files.append( - template_def.COMMON_ENV_PATH + 'with_master_lb.yaml') - else: - env_files.append( - template_def.COMMON_ENV_PATH + 'no_master_lb.yaml') - if cluster_template.floating_ip_enabled: - env_files.append( - template_def.COMMON_ENV_PATH + 'enable_floating_ip.yaml') - else: - env_files.append( - template_def.COMMON_ENV_PATH + 'disable_floating_ip.yaml') - - return env_files - - @property - def driver_module_path(self): - return __name__[:__name__.rindex('.')] - - @property - def template_path(self): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'templates/kubecluster.yaml') diff --git a/contrib/drivers/k8s_opensuse_v1/templates/COPYING b/contrib/drivers/k8s_opensuse_v1/templates/COPYING deleted file mode 100644 index d6456956..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/contrib/drivers/k8s_opensuse_v1/templates/README.md b/contrib/drivers/k8s_opensuse_v1/templates/README.md deleted file mode 100644 index efa24c45..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/README.md +++ /dev/null @@ -1,129 +0,0 @@ -A Kubernetes cluster with Heat -============================== - -These [Heat][] templates will deploy a [Kubernetes][] cluster that -supports automatic scaling based on CPU load. - -[heat]: https://wiki.openstack.org/wiki/Heat -[kubernetes]: https://github.com/GoogleCloudPlatform/kubernetes - -The cluster uses [Flannel][] to provide an overlay network connecting -pods deployed on different minions. - -[flannel]: https://github.com/coreos/flannel - -## Requirements - -### Guest image - -These templates will work with either openSUSE JeOS or SLES JeOS images -that are prepared for Docker and Kubernetes. - -You can enable docker registry v2 by setting the "registry_enabled" -parameter to "true". - -## Creating the stack - -Creating an environment file `local.yaml` with parameters specific to -your environment: - - parameters: - ssh_key_name: testkey - external_network: public - dns_nameserver: 192.168.200.1 - server_image: openSUSELeap42.1-jeos-k8s - registry_enabled: true - registry_username: username - registry_password: password - registry_domain: domain - registry_trust_id: trust_id - registry_auth_url: auth_url - registry_region: region - registry_container: container - -And then create the stack, referencing that environment file: - - heat stack-create -f kubecluster.yaml -e local.yaml my-kube-cluster - -You must provide values for: - -- `ssh_key_name` -- `server_image` - -If you enable docker registry v2, you must provide values for: - -- `registry_username` -- `registry_password` -- `registry_domain` -- `registry_trust_id` -- `registry_auth_url` -- `registry_region` -- `registry_container - -## Interacting with Kubernetes - -You can get the ip address of the Kubernetes master using the `heat -output-show` command: - - $ heat output-show my-kube-cluster kube_masters - "192.168.200.86" - -You can ssh into that server as the `minion` user: - - $ ssh minion@192.168.200.86 - -And once logged in you can run `kubectl`, etc: - - $ kubectl get minions - NAME LABELS STATUS - 10.0.0.4 Ready - -You can log into your minions using the `minion` user as well. You -can get a list of minion addresses by running: - - $ heat output-show my-kube-cluster kube_minions - [ - "192.168.200.182" - ] - -You can get the docker registry v2 address: - $ heat output-show my-kube-cluster registry_address - localhost:5000 - -## Testing - -The templates install an example Pod and Service description into -`/etc/kubernetes/examples`. You can deploy this with the following -commands: - - $ kubectl create -f /etc/kubernetes/examples/web.service - $ kubectl create -f /etc/kubernetes/examples/web.pod - -This will deploy a minimal webserver and a service. You can use -`kubectl get pods` and `kubectl get services` to see the results of -these commands. - -## License - -Copyright 2016 SUSE Linux GmbH - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use these files except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -## Contributing - -Please submit bugs and pull requests via the Gerrit repository at -https://review.openstack.org/. For more information, please refer -to the following resources: - -* **Documentation:** http://docs.openstack.org/developer/magnum -* **Source:** http://git.openstack.org/cgit/openstack/magnum diff --git a/contrib/drivers/k8s_opensuse_v1/templates/fragments/add-proxy.sh b/contrib/drivers/k8s_opensuse_v1/templates/fragments/add-proxy.sh deleted file mode 100644 index 002096da..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/fragments/add-proxy.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -DOCKER_PROXY_CONF=/etc/systemd/system/docker.service.d/proxy.conf -BASH_RC=/etc/bashrc - -mkdir -p /etc/systemd/system/docker.service.d - -if [ -n "$HTTP_PROXY" ]; then - cat < $DOCKER_PROXY_CONF - [Service] - Environment=HTTP_PROXY=$HTTP_PROXY -EOF - - systemctl daemon-reload - systemctl --no-block restart docker.service - - if [ -f "$BASH_RC" ]; then - echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting http_proxy" - fi -fi - -if [ -n "$HTTPS_PROXY" ]; then - if [ -f "$BASH_RC" ]; then - echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting https_proxy" - fi -fi - -if [ -n "$NO_PROXY" ]; then - if [ -f "$BASH_RC" ]; then - echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting no_proxy" - fi -fi diff --git a/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-docker.sh b/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-docker.sh deleted file mode 100644 index 72d25209..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-docker.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -echo "stopping docker" -systemctl stop docker -ip link del docker0 - -if [ "$NETWORK_DRIVER" == "flannel" ]; then - - FLANNEL_ENV=/run/flannel/subnet.env - - attempts=60 - while [[ ! -f $FLANNEL_ENV && $attempts != 0 ]]; do - echo "waiting for file $FLANNEL_ENV" - sleep 1 - let attempts-- - done - - source $FLANNEL_ENV - - if ! [ "\$FLANNEL_SUBNET" ] && [ "\$FLANNEL_MTU" ] ; then - echo "ERROR: missing required environment variables." >&2 - exit 1 - fi - - if `grep -q DOCKER_NETWORK_OPTIONS /etc/sysconfig/docker`; then - sed -i ' - /^DOCKER_NETWORK_OPTIONS=/ s|=.*|="--bip='"$FLANNEL_SUBNET"' --mtu='"$FLANNEL_MTU"'"| - ' /etc/sysconfig/docker - else - echo "DOCKER_NETWORK_OPTIONS=\"--bip=$FLANNEL_SUBNET --mtu=$FLANNEL_MTU\"" >> /etc/sysconfig/docker - fi - - sed -i ' - /^DOCKER_OPTS=/ s/=.*/="--storage-driver=btrfs"/ - ' /etc/sysconfig/docker -fi - -DOCKER_DEV=/dev/disk/by-id/virtio-${DOCKER_VOLUME:0:20} - -attempts=60 -while [[ ! -b $DOCKER_DEV && $attempts != 0 ]]; do - echo "waiting for disk $DOCKER_DEV" - sleep 0.5 - udevadm trigger - let attempts-- -done - -if ! [ -b $DOCKER_DEV ]; then - echo "ERROR: device $DOCKER_DEV does not exist" >&2 - exit 1 -fi - -mkfs.btrfs $DOCKER_DEV - -mount $DOCKER_DEV /var/lib/docker - -# update /etc/fstab with DOCKER_DEV -if ! `grep -q /var/lib/docker /etc/fstab`; then - grep /var/lib/docker /etc/mtab | head -1 >> /etc/fstab -fi - -# make sure we pick up any modified unit files -systemctl daemon-reload - -echo "activating docker service" -systemctl enable docker - -echo "starting docker service" -systemctl --no-block start docker diff --git a/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-etcd.sh b/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-etcd.sh deleted file mode 100644 index aefd1e79..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-etcd.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -myip="$KUBE_NODE_IP" - -sed -i ' - /ETCD_NAME=/c ETCD_NAME="'$myip'" - /ETCD_DATA_DIR=/c ETCD_DATA_DIR="/var/lib/etcd/default.etcd" - /ETCD_LISTEN_CLIENT_URLS=/c ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" - /ETCD_LISTEN_PEER_URLS=/c ETCD_LISTEN_PEER_URLS="http://'$myip':2380" - /ETCD_ADVERTISE_CLIENT_URLS=/c ETCD_ADVERTISE_CLIENT_URLS="http://'$myip':2379" - /ETCD_INITIAL_ADVERTISE_PEER_URLS=/c ETCD_INITIAL_ADVERTISE_PEER_URLS="http://'$myip':2380" - /ETCD_DISCOVERY=/c ETCD_DISCOVERY="'$ETCD_DISCOVERY_URL'" -' /etc/sysconfig/etcd - -echo "activating etcd service" -systemctl enable etcd - -echo "starting etcd service" -systemctl --no-block start etcd diff --git a/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-master.sh b/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-master.sh deleted file mode 100644 index 0e2c9c44..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-master.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ "$NETWORK_DRIVER" != "flannel" ]; then - exit 0 -fi - -FLANNEL_ETCD="http://127.0.0.1:2379" -FLANNEL_JSON=/etc/sysconfig/flannel-network.json -FLANNELD_CONFIG=/etc/sysconfig/flanneld - -sed -i ' - /^FLANNEL_ETCD=/ s/=.*/="http:\/\/127.0.0.1:2379"/ - /^#FLANNEL_OPTIONS=/ s//FLANNEL_OPTIONS="-iface eth0 --ip-masq"/ -' /etc/sysconfig/flanneld - -cat >> /etc/sysconfig/flanneld < $FLANNEL_JSON <> /etc/sysconfig/flanneld </dev/null -fi - -# Setting correct permissions for Kubernetes files -chown -R kube:kube /var/lib/kubernetes - -KUBE_API_ARGS="--service-account-key-file=$SERVICE_ACCOUNT_KEY --runtime_config=api/all=true" - -if [ "$TLS_DISABLED" == "True" ]; then - sed -i ' - /^# KUBE_API_PORT=/ s|.*|KUBE_API_PORT="--port=8080 --insecure-port='"$KUBE_API_PORT"'"| - ' /etc/kubernetes/apiserver -else - # insecure port is used internaly - sed -i ' - /^# KUBE_API_PORT=/ s|.*|KUBE_API_PORT="--port=8080 --insecure-port=8080 --secure-port='"$KUBE_API_PORT"'"| - ' /etc/kubernetes/apiserver - KUBE_API_ARGS="$KUBE_API_ARGS --tls_cert_file=/etc/kubernetes/ssl/server.crt" - KUBE_API_ARGS="$KUBE_API_ARGS --tls_private_key_file=/etc/kubernetes/ssl/server.key" - KUBE_API_ARGS="$KUBE_API_ARGS --client_ca_file=/etc/kubernetes/ssl/ca.crt" -fi - -sed -i ' - /^KUBE_ALLOW_PRIV=/ s|=.*|="--allow-privileged='"$KUBE_ALLOW_PRIV"'"| -' /etc/kubernetes/config - -sed -i ' - /^KUBE_API_ADDRESS=/ s|=.*|="--advertise-address='"$KUBE_NODE_IP"' --insecure-bind-address=0.0.0.0 --bind_address=0.0.0.0"| - /^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"| - /^KUBE_API_ARGS=/ s|=.*|="--service-account-key-file='"$SERVICE_ACCOUNT_KEY"' --runtime-config=api\/all=true"| - /^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/ - /^KUBE_ADMISSION_CONTROL=/ s/=.*/="--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota"/ -' /etc/kubernetes/apiserver - -cat >> /etc/kubernetes/apiserver <> /etc/kubernetes/controller-manager < /etc/sysconfig/kubernetes_openstack_config <> /etc/environment < $CA_CERT - -# Create config for client's csr -cat > ${cert_dir}/client.conf < ${CLIENT_CERT} - -chmod 700 ${cert_dir} -chmod 600 ${cert_dir}/* -chown -R kube:kube ${cert_dir} - -sed -i ' - s|CA_CERT|'"$CA_CERT"'| - s|CLIENT_CERT|'"$CLIENT_CERT"'| - s|CLIENT_KEY|'"$CLIENT_KEY"'| - s|KUBE_MASTER_URI|'"$KUBE_MASTER_URI"'| -' /etc/kubernetes/kubeconfig.yaml diff --git a/contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert.sh b/contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert.sh deleted file mode 100644 index a63bd313..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/fragments/make-cert.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/bin/sh - -# Copyright 2014 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -. /etc/sysconfig/heat-params - -set -o errexit -set -o nounset -set -o pipefail - -if [ "$TLS_DISABLED" == "True" ]; then - exit 0 -fi - -if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then - KUBE_NODE_PUBLIC_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) -fi -if [[ -z "${KUBE_NODE_IP}" ]]; then - KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) -fi - -sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}" -if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \ - && [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then - sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}" -fi -if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \ - && [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then - sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}" -fi -MASTER_HOSTNAME=${MASTER_HOSTNAME:-} -if [[ -n "${MASTER_HOSTNAME}" ]]; then - sans="${sans},DNS:${MASTER_HOSTNAME}" -fi -sans="${sans},IP:127.0.0.1" - -cert_dir=/etc/kubernetes/ssl - -mkdir -p "$cert_dir" - -CA_CERT=$cert_dir/ca.crt -SERVER_CERT=$cert_dir/server.crt -SERVER_CSR=$cert_dir/server.csr -SERVER_KEY=$cert_dir/server.key - -#Get a token by user credentials and trust -auth_json=$(cat << EOF -{ - "auth": { - "identity": { - "methods": [ - "password" - ], - "password": { - "user": { - "id": "$TRUSTEE_USER_ID", - "password": "$TRUSTEE_PASSWORD" - } - } - }, - "scope": { - "OS-TRUST:trust": { - "id": "$TRUST_ID" - } - } - } -} -EOF -) - -#trust is introduced in Keystone v3 version -AUTH_URL=${AUTH_URL/v2.0/v3} -content_type='Content-Type: application/json' -url="$AUTH_URL/auth/tokens" -USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \ - | grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` - -# Get CA certificate for this cluster -curl -X GET \ - -H "X-Auth-Token: $USER_TOKEN" \ - $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT} - -# Create config for server's csr -cat > ${cert_dir}/server.conf < ${SERVER_CERT} - -chmod 700 ${cert_dir} -chmod 600 ${cert_dir}/* -chown -R kube:kube ${cert_dir} diff --git a/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-master.yaml b/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-master.yaml deleted file mode 100644 index b0b9de18..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-master.yaml +++ /dev/null @@ -1,38 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/sysconfig/heat-params - owner: "root:root" - permissions: "0644" - content: | - KUBE_NODE_IP="$KUBE_NODE_IP" - KUBE_API_PORT="$KUBE_API_PORT" - KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" - KUBE_MASTER_IPS="$KUBE_MASTER_IPS" - KUBE_MINION_IPS="$KUBE_MINION_IPS" - KUBE_NODE_PUBLIC_IP="$KUBE_NODE_PUBLIC_IP" - KUBE_NODE_IP="$KUBE_NODE_IP" - KUBE_NODE_NAME="$KUBE_NODE_NAME" - NETWORK_DRIVER="$NETWORK_DRIVER" - FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR" - FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN" - FLANNEL_NETWORK_SUBNET_MIN="$FLANNEL_NETWORK_SUBNET_MIN" - FLANNEL_NETWORK_SUBNET_MAX="$FLANNEL_NETWORK_SUBNET_MAX" - FLANNEL_BACKEND="$FLANNEL_BACKEND" - PORTAL_NETWORK_CIDR="$PORTAL_NETWORK_CIDR" - ETCD_DISCOVERY_URL="$ETCD_DISCOVERY_URL" - AUTH_URL="$AUTH_URL" - USERNAME="$USERNAME" - PASSWORD="$PASSWORD" - TENANT_NAME="$TENANT_NAME" - CLUSTER_SUBNET="$CLUSTER_SUBNET" - TLS_DISABLED="$TLS_DISABLED" - KUBE_VERSION="$KUBE_VERSION" - CLUSTER_UUID="$CLUSTER_UUID" - MAGNUM_URL="$MAGNUM_URL" - SYSTEM_PODS_INITIAL_DELAY="$SYSTEM_PODS_INITIAL_DELAY" - SYSTEM_PODS_TIMEOUT="$SYSTEM_PODS_TIMEOUT" - TRUSTEE_USER_ID="$TRUSTEE_USER_ID" - TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" - TRUST_ID="$TRUST_ID" - DOMAIN_NAME="$DOMAIN_NAME" diff --git a/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-minion.yaml b/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-minion.yaml deleted file mode 100644 index 2ee0ce81..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-heat-params-minion.yaml +++ /dev/null @@ -1,38 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/sysconfig/heat-params - owner: "root:root" - permissions: "0644" - content: | - KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" - KUBE_MASTER_IP="$KUBE_MASTER_IP" - KUBE_API_PORT="$KUBE_API_PORT" - KUBE_NODE_IP="$KUBE_NODE_IP" - ETCD_SERVER_IP="$ETCD_SERVER_IP" - DOCKER_VOLUME="$DOCKER_VOLUME" - NETWORK_DRIVER="$NETWORK_DRIVER" - REGISTRY_ENABLED="$REGISTRY_ENABLED" - REGISTRY_PORT="$REGISTRY_PORT" - REGISTRY_AUTH_URL="$REGISTRY_AUTH_URL" - REGISTRY_REGION="$REGISTRY_REGION" - REGISTRY_USERNAME="$REGISTRY_USERNAME" - REGISTRY_PASSWORD="$REGISTRY_PASSWORD" - REGISTRY_DOMAIN="$REGISTRY_DOMAIN" - REGISTRY_TRUST_ID="$REGISTRY_TRUST_ID" - REGISTRY_CONTAINER="$REGISTRY_CONTAINER" - REGISTRY_INSECURE="$REGISTRY_INSECURE" - REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE" - TLS_DISABLED="$TLS_DISABLED" - KUBE_VERSION="$KUBE_VERSION" - CLUSTER_UUID="$CLUSTER_UUID" - MAGNUM_URL="$MAGNUM_URL" - HTTP_PROXY="$HTTP_PROXY" - HTTPS_PROXY="$HTTPS_PROXY" - NO_PROXY="$NO_PROXY" - AUTH_URL="$AUTH_URL" - TRUSTEE_USER_ID="$TRUSTEE_USER_ID" - TRUSTEE_USERNAME="$TRUSTEE_USERNAME" - TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" - TRUSTEE_DOMAIN_ID="$TRUSTEE_DOMAIN_ID" - TRUST_ID="$TRUST_ID" diff --git a/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-kubeconfig.yaml b/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-kubeconfig.yaml deleted file mode 100644 index 78930881..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/fragments/write-kubeconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/kubernetes/kubeconfig.yaml - owner: "root:root" - permissions: "0644" - content: | - apiVersion: v1 - kind: Config - users: - - name: kubeclient - user: - client-certificate: CLIENT_CERT - client-key: CLIENT_KEY - clusters: - - name: kubernetes - cluster: - certificate-authority: CA_CERT - server: KUBE_MASTER_URI - contexts: - - context: - cluster: kubernetes - user: kubeclient - name: service-account-context - current-context: service-account-context diff --git a/contrib/drivers/k8s_opensuse_v1/templates/kubecluster.yaml b/contrib/drivers/k8s_opensuse_v1/templates/kubecluster.yaml deleted file mode 100644 index 19fda3ca..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/kubecluster.yaml +++ /dev/null @@ -1,664 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This template will boot a Kubernetes cluster with one or more - minions (as specified by the number_of_minions parameter, which - defaults to 1). - -parameters: - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - default: public - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - default: m1.small - description: flavor to use when booting the server for master nodes - - minion_flavor: - type: string - default: m1.small - description: flavor to use when booting the server for minions - - dns_nameserver: - type: string - description: address of a DNS nameserver reachable in your environment - default: 8.8.8.8 - - number_of_masters: - type: number - description: how many kubernetes masters to spawn - default: 1 - - number_of_minions: - type: number - description: how many kubernetes minions to spawn - default: 1 - - fixed_network_cidr: - type: string - description: network range for fixed ip network - default: 10.0.0.0/24 - - portal_network_cidr: - type: string - description: > - address range used by kubernetes for service portals - default: 10.254.0.0/16 - - network_driver: - type: string - description: network driver to use for instantiating container networks - default: flannel - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - default: 10.100.0.0/16 - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each minion - default: 24 - - flannel_network_subnet_min: - type: string - description: minimum subnet - default: 10.100.50.0 - - flannel_network_subnet_max: - type: string - description: maximum subnet - default: 10.100.199.0 - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - default: "udp" - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - system_pods_initial_delay: - type: number - description: > - health check, time to wait for system pods (podmaster, scheduler) to boot - (in seconds) - default: 30 - - system_pods_timeout: - type: number - description: > - health check, timeout for system pods (podmaster, scheduler) to answer. - (in seconds) - default: 5 - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - default: "true" - constraints: - - allowed_values: ["true", "false"] - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - default: 0 - - wait_condition_timeout: - type: number - description: > - timeout for the Wait Conditions - default: 2400 - - minions_to_remove: - type: comma_delimited_list - description: > - List of minions to be removed when doing an update. Individual minion may - be referenced several ways: (1) The resource name (e.g. ['1', '3']), - (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should - be empty when doing an create. - default: [] - - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - default: false - - registry_port: - type: number - description: port of registry service - default: 5000 - - registry_username: - type: string - description: username used by docker registry - default: "username" - - registry_password: - type: string - description: password used by docker registry - default: "password" - hidden: true - - registry_domain: - type: string - description: domain used by docker registry - default: "domain" - - registry_trust_id: - type: string - description: trust_id used by docker registry - default: "trust_id" - hidden: true - - registry_auth_url: - type: string - description: auth_url for keystone - default: "auth_url" - - registry_region: - type: string - description: region of swift service - default: "region" - - registry_container: - type: string - description: > - name of swift container which docker registry stores images in - default: "container" - - registry_insecure: - type: boolean - description: > - indicates whether to skip TLS verification between registry and backend storage - default: true - - registry_chunksize: - type: number - description: > - size fo the data segments for the swift dynamic large objects - default: 5242880 - - auth_url: - type: string - description: > - url for kubernetes to authenticate before sending request to neutron - must be v2 since kubernetes backend only suppor v2 at this point - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - default: v1.3.7 - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - username: - type: string - description: > - user account - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for Kubernetes config file - default: - ChangeMe - hidden: true - - tenant_name: - type: string - description: > - tenant name - - loadbalancing_protocol: - type: string - description: > - The protocol which is used for load balancing. If you want to change - tls_disabled option to 'True', please change this to "HTTP". - default: TCP - constraints: - - allowed_values: ["TCP", "HTTP"] - - tls_disabled: - type: boolean - description: whether or not to disable TLS - default: False - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - default: 6443 - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - http_proxy: - type: string - description: http proxy address for docker - default: "" - - https_proxy: - type: string - description: https proxy address for docker - default: "" - - no_proxy: - type: string - description: no proxies for docker - default: "" - - trustee_domain_id: - type: string - description: domain id of the trustee - default: "" - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_username: - type: string - description: username of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - domain_name: - type: string - description: domain_name - default: "" - -resources: - - ###################################################################### - # - # network resources. allocate a network and router for our server. - # Important: the Load Balancer feature in Kubernetes requires that - # the name for the fixed_network must be "private" for the - # address lookup in Kubernetes to work properly - # - - fixed_network: - type: OS::Neutron::Net - properties: - name: private - - fixed_subnet: - type: OS::Neutron::Subnet - properties: - cidr: {get_param: fixed_network_cidr} - network: {get_resource: fixed_network} - dns_nameservers: - - {get_param: dns_nameserver} - - extrouter: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: {get_param: external_network} - - extrouter_inside: - type: OS::Neutron::RouterInterface - properties: - router_id: {get_resource: extrouter} - subnet: {get_resource: fixed_subnet} - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup_base: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - secgroup_kube_master: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: tcp - port_range_min: 7080 - port_range_max: 7080 - - protocol: tcp - port_range_min: 8080 - port_range_max: 8080 - - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - - protocol: tcp - port_range_min: 6443 - port_range_max: 6443 - - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - - protocol: udp - port_range_min: 8285 - port_range_max: 8285 - - protocol: udp - port_range_min: 8472 - port_range_max: 8472 - - secgroup_kube_minion: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # load balancers. - # - - api_loadbalancer: - type: Magnum::Optional::Neutron::LBaaS::LoadBalancer - properties: - vip_subnet: {get_resource: fixed_subnet} - - api_listener: - type: Magnum::Optional::Neutron::LBaaS::Listener - properties: - loadbalancer: {get_resource: api_loadbalancer} - protocol: {get_param: loadbalancing_protocol} - protocol_port: {get_param: kubernetes_port} - - api_pool: - type: Magnum::Optional::Neutron::LBaaS::Pool - properties: - lb_algorithm: ROUND_ROBIN - listener: {get_resource: api_listener} - protocol: {get_param: loadbalancing_protocol} - - api_monitor: - type: Magnum::Optional::Neutron::LBaaS::HealthMonitor - properties: - type: TCP - delay: 5 - max_retries: 5 - timeout: 5 - pool: { get_resource: api_pool } - - api_pool_floating: - type: Magnum::Optional::Neutron::FloatingIP - depends_on: - - extrouter_inside - properties: - floating_network: {get_param: external_network} - port_id: {get_attr: [api_loadbalancer, vip_port_id]} - - etcd_loadbalancer: - type: Magnum::Optional::Neutron::LBaaS::LoadBalancer - properties: - vip_subnet: {get_resource: fixed_subnet} - - etcd_listener: - type: Magnum::Optional::Neutron::LBaaS::Listener - properties: - loadbalancer: {get_resource: etcd_loadbalancer} - protocol: HTTP - protocol_port: 2379 - - etcd_pool: - type: Magnum::Optional::Neutron::LBaaS::Pool - properties: - lb_algorithm: ROUND_ROBIN - listener: {get_resource: etcd_listener} - protocol: HTTP - - etcd_monitor: - type: Magnum::Optional::Neutron::LBaaS::HealthMonitor - properties: - type: TCP - delay: 5 - max_retries: 5 - timeout: 5 - pool: { get_resource: etcd_pool } - - ###################################################################### - # - # resources that expose the IPs of either the kube master or a given - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_attr: [api_pool_floating, floating_ip_address]} - pool_private_ip: {get_attr: [api_loadbalancer, vip_address]} - master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} - master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} - - etcd_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_private_ip: {get_attr: [etcd_loadbalancer, vip_address]} - master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} - - ###################################################################### - # - # resources that expose the IPs of either floating ip or a given - # fixed ip depending on whether FloatingIP is enabled for the cluster. - # - - api_address_floating_switch: - type: Magnum::FloatingIPAddressSwitcher - properties: - public_ip: {get_attr: [api_address_lb_switch, public_ip]} - private_ip: {get_attr: [api_address_lb_switch, private_ip]} - - ###################################################################### - # - # kubernetes masters. This is a resource group that will create - # masters. - # - - kube_masters: - type: OS::Heat::ResourceGroup - depends_on: - - extrouter_inside - properties: - count: {get_param: number_of_masters} - resource_def: - type: kubemaster.yaml - properties: - api_public_address: {get_attr: [api_pool_floating, floating_ip_address]} - api_private_address: {get_attr: [api_loadbalancer, vip_address]} - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - master_flavor: {get_param: master_flavor} - external_network: {get_param: external_network} - kube_allow_priv: {get_param: kube_allow_priv} - wait_condition_timeout: {get_param: wait_condition_timeout} - network_driver: {get_param: network_driver} - flannel_backend: {get_param: flannel_backend} - flannel_network_cidr: {get_param: flannel_network_cidr} - flannel_network_subnetlen: {get_param: flannel_network_subnetlen} - flannel_network_subnet_min: {get_param: flannel_network_subnet_min} - flannel_network_subnet_max: {get_param: flannel_network_subnet_max} - system_pods_initial_delay: {get_param: system_pods_initial_delay} - system_pods_timeout: {get_param: system_pods_timeout} - portal_network_cidr: {get_param: portal_network_cidr} - discovery_url: {get_param: discovery_url} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - kube_version: {get_param: kube_version} - fixed_network: {get_resource: fixed_network} - fixed_subnet: {get_resource: fixed_subnet} - api_pool_id: {get_resource: api_pool} - etcd_pool_id: {get_resource: etcd_pool} - auth_url: {get_param: auth_url} - username: {get_param: username} - password: {get_param: password} - tenant_name: {get_param: tenant_name} - kubernetes_port: {get_param: kubernetes_port} - tls_disabled: {get_param: tls_disabled} - secgroup_base_id: {get_resource: secgroup_base} - secgroup_kube_master_id: {get_resource: secgroup_kube_master} - kube_master_id: 'kube-master%index%' - kube_master_ports: { get_attr: [kube_master_ports, refs] } - kube_master_ips: {get_attr: [kube_master_ports, fixed_ip]} - kube_master_ips_list: { list_join: ["|", {get_attr: [kube_master_ports, fixed_ip]} ] } - kube_minion_ips_list: { list_join: ["|", {get_attr: [kube_minion_ports, fixed_ip]} ] } - trustee_user_id: {get_param: trustee_user_id} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - domain_name: {get_param: domain_name} - - ###################################################################### - # - # kubernetes minions. This is an resource group that will initially - # create minions, and needs to be manually scaled. - # - - kube_minions: - type: OS::Heat::ResourceGroup - depends_on: - - extrouter_inside - - kube_masters - properties: - count: {get_param: number_of_minions} - removal_policies: [{resource_list: {get_param: minions_to_remove}}] - resource_def: - type: kubeminion.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - minion_flavor: {get_param: minion_flavor} - fixed_network: {get_resource: fixed_network} - fixed_subnet: {get_resource: fixed_subnet} - network_driver: {get_param: network_driver} - flannel_network_cidr: {get_param: flannel_network_cidr} - kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} - etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} - external_network: {get_param: external_network} - kube_allow_priv: {get_param: kube_allow_priv} - docker_volume_size: {get_param: docker_volume_size} - wait_condition_timeout: {get_param: wait_condition_timeout} - registry_enabled: {get_param: registry_enabled} - registry_port: {get_param: registry_port} - registry_username: {get_param: registry_username} - registry_password: {get_param: registry_password} - registry_domain: {get_param: registry_domain} - registry_trust_id: {get_param: registry_trust_id} - registry_auth_url: {get_param: registry_auth_url} - registry_region: {get_param: registry_region} - registry_container: {get_param: registry_container} - registry_insecure: {get_param: registry_insecure} - registry_chunksize: {get_param: registry_chunksize} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - kube_version: {get_param: kube_version} - kubernetes_port: {get_param: kubernetes_port} - tls_disabled: {get_param: tls_disabled} - secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} - kube_minion_id: 'kube-minion%index%' - kube_minion_ports: { get_attr: [kube_minion_ports, refs] } - kube_minion_ips: {get_attr: [kube_minion_ports, fixed_ip]} - kube_master_ips_list: { list_join: ["|", {get_attr: [kube_master_ports, fixed_ip]} ] } - kube_minion_ips_list: { list_join: ["|", {get_attr: [kube_minion_ports, fixed_ip]} ] } - auth_url: {get_param: auth_url} - trustee_user_id: {get_param: trustee_user_id} - trustee_username: {get_param: trustee_username} - trustee_password: {get_param: trustee_password} - trustee_domain_id: {get_param: trustee_domain_id} - trust_id: {get_param: trust_id} - -outputs: - - api_address: - value: - str_replace: - template: api_ip_address - params: - api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} - description: > - This is the API endpoint of the Kubernetes cluster. Use this to access - the Kubernetes API. - - registry_address: - value: - str_replace: - template: localhost:port - params: - port: {get_param: registry_port} - description: - This is the url of docker registry server where you can store docker - images. - - kube_masters: - value: {get_attr: [kube_masters, kube_master_external_ip]} - description: > - This is a list of the "public" IP addresses of all the Kubernetes masters. - Use these IP addresses to log in to the Kubernetes masters via ssh. - - kube_minions: - value: {get_attr: [kube_minions, kube_minion_ip]} - description: > - This is a list of the "private" IP addresses of all the Kubernetes minions. - - kube_minions_external: - value: {get_attr: [kube_minions, kube_minion_external_ip]} - description: > - This is a list of the "public" IP addresses of all the Kubernetes minions. - Use these IP addresses to log in to the Kubernetes minions via ssh. diff --git a/contrib/drivers/k8s_opensuse_v1/templates/kubemaster.yaml b/contrib/drivers/k8s_opensuse_v1/templates/kubemaster.yaml deleted file mode 100644 index a92069df..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/kubemaster.yaml +++ /dev/null @@ -1,388 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Kubernetes master, This stack is - included by an ResourceGroup resource in the parent template - (kubecluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - default: m1.small - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - default: lars - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - portal_network_cidr: - type: string - description: > - address range used by kubernetes for service portals - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - default: "false" - constraints: - - allowed_values: ["true", "false"] - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each master - - flannel_network_subnet_min: - type: string - description: minimum subnet - - flannel_network_subnet_max: - type: string - description: maximum subnet - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - system_pods_initial_delay: - type: number - description: > - health check, time to wait for system pods (podmaster, scheduler) to boot - (in seconds) - default: 30 - - system_pods_timeout: - type: number - description: > - health check, timeout for system pods (podmaster, scheduler) to answer. - (in seconds) - default: 5 - - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - api_public_address: - type: string - description: Public IP address of the Kubernetes master server. - default: "" - - api_private_address: - type: string - description: Private IP address of the Kubernetes master server. - default: "" - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - network_driver: - type: string - description: network driver to use for instantiating container networks - - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - - secgroup_base_id: - type: string - description: ID of the security group for base. - - secgroup_kube_master_id: - type: string - description: ID of the security group for kubernetes master. - - api_pool_id: - type: string - description: ID of the load balancer pool of k8s API server. - - etcd_pool_id: - type: string - description: ID of the load balancer pool of etcd server. - - auth_url: - type: string - description: > - url for kubernetes to authenticate - - username: - type: string - description: > - user account - - password: - type: string - description: > - user password - - tenant_name: - type: string - description: > - tenant name - - kube_master_id: - type: string - description: ID of for kubernetes master. - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - domain_name: - type: string - description: domain name - -resources: - - master_wait_handle: - type: OS::Heat::WaitConditionHandle - - master_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube_master - properties: - handle: {get_resource: master_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params-master.yaml} - params: - "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} - "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - "$KUBE_API_PORT": {get_param: kubernetes_port} - "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} - "$KUBE_MASTER_IPS": {get_param: kube_master_ips_list} - "$KUBE_MINION_IPS": {get_param: kube_minion_ips_list} - "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} - "$KUBE_NODE_IP": { "Fn::Select": [ { get_param: kube_master_index }, { get_param: kube_master_ips} ] } - "$KUBE_NODE_NAME": {get_param: kube_master_id} - "$NETWORK_DRIVER": {get_param: network_driver} - "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} - "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} - "$FLANNEL_NETWORK_SUBNET_MIN": {get_param: flannel_network_subnet_min} - "$FLANNEL_NETWORK_SUBNET_MAX": {get_param: flannel_network_subnet_max} - "$FLANNEL_BACKEND": {get_param: flannel_backend} - "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} - "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} - "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} - "$ETCD_DISCOVERY_URL": {get_param: discovery_url} - "$AUTH_URL": {get_param: auth_url} - "$USERNAME": {get_param: username} - "$PASSWORD": {get_param: password} - "$TENANT_NAME": {get_param: tenant_name} - "$CLUSTER_SUBNET": {get_param: fixed_subnet} - "$TLS_DISABLED": {get_param: tls_disabled} - "$KUBE_VERSION": {get_param: kube_version} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$DOMAIN_NAME": {get_param: domain_name} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/make-cert.sh} - - configure_etcd: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-etcd.sh} - - configure_flanneld: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-flanneld-master.sh} - - create_kubernetes_user: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/create-kubernetes-user.yaml} - - configure_kubernetes: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-kubernetes-master.sh} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/add-proxy.sh} - - master_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - #!/bin/bash -v - wc_notify --data-binary '{"status": "SUCCESS"}' - params: - wc_notify: {get_attr: [master_wait_handle, curl_cli]} - - kube_master_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: write_heat_params} - - config: {get_resource: make_cert} - - config: {get_resource: configure_etcd} - - config: {get_resource: configure_flanneld} - - config: {get_resource: create_kubernetes_user} - - config: {get_resource: configure_kubernetes} - - config: {get_resource: add_proxy} - - config: {get_resource: master_wc_notify} - - ###################################################################### - # - # a single kubernetes master. - # - - kube_master: - type: OS::Nova::Server - properties: - name: {get_param: kube_master_id} - image: {get_param: server_image} - flavor: {get_param: master_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_master_init} - config_drive: true - networks: - - port: {get_resource: kube_master_eth0} - - kube_master_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - {get_param: secgroup_base_id} - - {get_param: secgroup_kube_master_id} - fixed_ips: - - subnet: {get_param: fixed_subnet} - allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} - replacement_policy: AUTO - - kube_master_floating: - type: Magnum::Optional::KubeMaster::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: kube_master_eth0} - - api_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_id} - address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: {get_param: kubernetes_port} - - etcd_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: etcd_pool_id} - address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 2379 - -outputs: - - kube_master_ip: - value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" IP address of the Kubernetes master node. - - kube_master_external_ip: - value: {get_attr: [kube_master_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes master node. diff --git a/contrib/drivers/k8s_opensuse_v1/templates/kubeminion.yaml b/contrib/drivers/k8s_opensuse_v1/templates/kubeminion.yaml deleted file mode 100644 index 192338ab..00000000 --- a/contrib/drivers/k8s_opensuse_v1/templates/kubeminion.yaml +++ /dev/null @@ -1,402 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Kubernetes minion, This stack is - included by an AutoScalingGroup resource in the parent template - (kubecluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - minion_flavor: - type: string - default: m1.small - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - default: lars - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - default: "false" - constraints: - - allowed_values: ["true", "false"] - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - default: 0 - - tls_disabled: - type: boolean - description: whether or not to enable TLS - default: False - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - default: 6443 - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - kube_master_ip: - type: string - description: IP address of the Kubernetes master server. - - etcd_server_ip: - type: string - description: IP address of the Etcd server. - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - network_driver: - type: string - description: network driver to use for instantiating container networks - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - default: false - - registry_port: - type: number - description: port of registry service - default: 5000 - - registry_username: - type: string - description: username used by docker registry - default: "username" - - registry_password: - type: string - description: password used by docker registry - default: "password" - - registry_domain: - type: string - description: domain used by docker registry - default: "domain" - - registry_trust_id: - type: string - description: trust_id used by docker registry - default: "trust_id" - - registry_auth_url: - type: string - description: auth_url for keystone - default: "auth_url" - - registry_region: - type: string - description: region of swift service - default: "region" - - registry_container: - type: string - description: > - name of swift container which docker registry stores images in - default: "container" - - registry_insecure: - type: boolean - description: > - indicates whether to skip TLS verification between registry and backend storage - default: true - - registry_chunksize: - type: number - description: > - size fo the data segments for the swift dynamic large objects - default: 5242880 - - secgroup_kube_minion_id: - type: string - description: ID of the security group for kubernetes minion. - - kube_minion_id: - type: string - description: ID of for kubernetes minion. - - auth_url: - type: string - description: > - url for kubernetes to authenticate before sending request to neutron - - trustee_domain_id: - type: string - description: domain id of the trustee - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_username: - type: string - description: username of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - -resources: - - minion_wait_handle: - type: OS::Heat::WaitConditionHandle - - minion_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube-minion - properties: - handle: {get_resource: minion_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params-minion.yaml} - params: - "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} - "$KUBE_MASTER_IP": {get_param: kube_master_ip} - "$KUBE_NODE_IP": {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} - "$KUBE_API_PORT": {get_param: kubernetes_port} - "$ETCD_SERVER_IP": {get_param: etcd_server_ip} - "$DOCKER_VOLUME": {get_resource: docker_volume} - "$NETWORK_DRIVER": {get_param: network_driver} - "$REGISTRY_ENABLED": {get_param: registry_enabled} - "$REGISTRY_PORT": {get_param: registry_port} - "$REGISTRY_AUTH_URL": {get_param: registry_auth_url} - "$REGISTRY_REGION": {get_param: registry_region} - "$REGISTRY_USERNAME": {get_param: registry_username} - "$REGISTRY_PASSWORD": {get_param: registry_password} - "$REGISTRY_DOMAIN": {get_param: registry_domain} - "$REGISTRY_TRUST_ID": {get_param: registry_trust_id} - "$REGISTRY_CONTAINER": {get_param: registry_container} - "$REGISTRY_INSECURE": {get_param: registry_insecure} - "$REGISTRY_CHUNKSIZE": {get_param: registry_chunksize} - "$TLS_DISABLED": {get_param: tls_disabled} - "$KUBE_VERSION": {get_param: kube_version} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$AUTH_URL": {get_param: auth_url} - "$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_USERNAME": {get_param: trustee_username} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - - write_kubeconfig: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/write-kubeconfig.yaml} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/make-cert-client.sh} - - configure_flanneld: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-flanneld-minion.sh} - - configure_docker: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-docker.sh} - - create_kubernetes_user: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/create-kubernetes-user.yaml} - - configure_kubernetes: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-kubernetes-minion.sh} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/add-proxy.sh} - - minion_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - #!/bin/bash -v - wc_notify --data-binary '{"status": "SUCCESS"}' - params: - wc_notify: {get_attr: [minion_wait_handle, curl_cli]} - - kube_minion_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: write_heat_params} - - config: {get_resource: write_kubeconfig} - - config: {get_resource: make_cert} - - config: {get_resource: configure_flanneld} - - config: {get_resource: configure_docker} - - config: {get_resource: create_kubernetes_user} - - config: {get_resource: configure_kubernetes} - - config: {get_resource: add_proxy} - - config: {get_resource: minion_wc_notify} - - ###################################################################### - # - # a single kubernetes minion. - # Important: the name for the heat resource kube-minion below must - # not contain "_" (underscore) because it will be used in the - # hostname. Because DNS domain name does not allow "_", the "_" - # will be converted to a "-" and this will make the hostname different - # from the Nova instance name. This in turn will break the load - # balancer feature in Kubernetes. - # - - kube-minion: - type: OS::Nova::Server - properties: - name: {get_param: kube_minion_id} - image: {get_param: server_image} - flavor: {get_param: minion_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_minion_init} - networks: - - port: {get_resource: kube_minion_eth0} - - kube_minion_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - get_param: secgroup_kube_minion_id - fixed_ips: - - subnet: {get_param: fixed_subnet} - allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} - replacement_policy: AUTO - - kube_minion_floating: - type: Magnum::Optional::KubeMinion::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: kube_minion_eth0} - - ###################################################################### - # - # docker storage. This allocates a cinder volume and attaches it - # to the minion. - # - - docker_volume: - type: OS::Cinder::Volume - properties: - size: {get_param: docker_volume_size} - - docker_volume_attach: - type: OS::Cinder::VolumeAttachment - properties: - instance_uuid: {get_resource: kube-minion} - volume_id: {get_resource: docker_volume} - mountpoint: /dev/vdb - -outputs: - - kube_minion_ip: - value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "public" IP address of the Kubernetes minion node. - - kube_minion_external_ip: - value: {get_attr: [kube_minion_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes minion node. - - OS::stack_id: - value: {get_param: "OS::stack_id"} - description: > - This is a id of the stack which creates from this template. diff --git a/contrib/drivers/k8s_opensuse_v1/version.py b/contrib/drivers/k8s_opensuse_v1/version.py deleted file mode 100644 index acc670d9..00000000 --- a/contrib/drivers/k8s_opensuse_v1/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 - SUSE Linux GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version = '1.0.0' -driver = 'k8s_opensuse_v1' -container_version = '1.12.3' diff --git a/contrib/templates/example/README.rst b/contrib/templates/example/README.rst deleted file mode 100644 index b20dd67f..00000000 --- a/contrib/templates/example/README.rst +++ /dev/null @@ -1,10 +0,0 @@ -======================== -Example Cluster Template -======================== - -The purpose of this example template is to demonstrate working with cluster -templates using magnum service. -The Heat template used in this example (example.yaml) provisions a single -server instance and does not produce a usable cluster. - -See ``_ for instructions. diff --git a/contrib/templates/example/example_template/__init__.py b/contrib/templates/example/example_template/__init__.py deleted file mode 100644 index 58a3d5d5..00000000 --- a/contrib/templates/example/example_template/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2015 Rackspace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from magnum.drivers.common import template_def - - -class ExampleTemplate(template_def.BaseTemplateDefinition): - provides = [ - {'server_type': 'vm', 'os': 'example', 'coe': 'example_coe'}, - {'server_type': 'vm', 'os': 'example2', 'coe': 'example_coe'}, - ] - - def __init__(self): - super(ExampleTemplate, self).__init__() - - self.add_output('server_address', - bay_attr='api_address') - self.add_output('node_addresses', - bay_attr='node_addresses') - - def template_path(self): - return os.path.join(os.path.dirname(__file__), 'example.yaml') diff --git a/contrib/templates/example/example_template/example.yaml b/contrib/templates/example/example_template/example.yaml deleted file mode 100644 index 91888a53..00000000 --- a/contrib/templates/example/example_template/example.yaml +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is just an example Heat template. It only provisions a single server - instance and does not produce a usable cluster. - -parameters: - # - # REQUIRED PARAMETERS - # - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - # - # OPTIONAL PARAMETERS - # - server_image: - type: string - default: centos-atomic - description: glance image used to boot the server - - server_flavor: - type: string - default: m1.small - description: flavor to use when booting the server - -resources: - - example_server: - type: "OS::Nova::Server" - properties: - image: - get_param: server_image - flavor: - get_param: server_flavor - key_name: - get_param: ssh_key_name - -outputs: - - server_address: - value: {get_attr: [example_server, accessIPv4]} - - node_addresses: - value: [] diff --git a/contrib/templates/example/setup.py b/contrib/templates/example/setup.py deleted file mode 100644 index f3c9122e..00000000 --- a/contrib/templates/example/setup.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2015 Rackspace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - name="ExampleTemplate", - version="0.1", - packages=['example_template'], - install_requires=['magnum'], - package_data={ - 'example_template': ['example.yaml'] - }, - author="Me", - author_email="me@example.com", - description="This is an Example Template", - license="Apache", - keywords="magnum template example", - entry_points={ - 'magnum.template_definitions': [ - 'example_template = example_template:ExampleTemplate' - ] - } -) diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index 93ed300a..00000000 --- a/devstack/README.rst +++ /dev/null @@ -1,33 +0,0 @@ -==================== -DevStack Integration -==================== - -This directory contains the files necessary to integrate magnum with devstack. - -Refer the quickstart guide at -http://docs.openstack.org/developer/magnum/dev/quickstart.html -for more information on using devstack and magnum. - -Running devstack with magnum for the first time may take a long time as it -needs to download the Fedora Atomic qcow2 image (see -http://www.projectatomic.io/download/). - -To install magnum into devstack, add the following settings to enable the -magnum plugin:: - - cat > /opt/stack/devstack/local.conf << END - [[local|localrc]] - enable_plugin heat https://github.com/openstack/heat master - enable_plugin magnum https://github.com/openstack/magnum master - END - -Additionally, you might need additional Neutron configurations for -your environment. -Please refer to the devstack documentation [#devstack_neutron]_ for details. - -Then run devstack normally:: - - cd /opt/stack/devstack - ./stack.sh - -.. [#devstack_neutron] https://docs.openstack.org/developer/devstack/guides/neutron.html diff --git a/devstack/lib/magnum b/devstack/lib/magnum deleted file mode 100644 index 2a6e49cd..00000000 --- a/devstack/lib/magnum +++ /dev/null @@ -1,363 +0,0 @@ -#!/bin/bash -# -# lib/magnum -# Functions to control the configuration and operation of the **magnum** service - -# Dependencies: -# -# - ``functions`` file -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - install_magnum -# - configure_magnum -# - create_magnum_conf -# - init_magnum -# - magnum_register_image -# - start_magnum -# - configure_iptables_magnum -# - stop_magnum -# - cleanup_magnum - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -MAGNUM_REPO=${MAGNUM_REPO:-${GIT_BASE}/openstack/magnum.git} -MAGNUM_BRANCH=${MAGNUM_BRANCH:-master} -MAGNUM_DIR=$DEST/magnum - -GITREPO["python-magnumclient"]=${MAGNUMCLIENT_REPO:-${GIT_BASE}/openstack/python-magnumclient.git} -GITBRANCH["python-magnumclient"]=${MAGNUMCLIENT_BRANCH:-master} -GITDIR["python-magnumclient"]=$DEST/python-magnumclient - -MAGNUM_STATE_PATH=${MAGNUM_STATE_PATH:=$DATA_DIR/magnum} -MAGNUM_AUTH_CACHE_DIR=${MAGNUM_AUTH_CACHE_DIR:-/var/cache/magnum} - -MAGNUM_CONF_DIR=/etc/magnum -MAGNUM_CONF=$MAGNUM_CONF_DIR/magnum.conf -MAGNUM_POLICY_JSON=$MAGNUM_CONF_DIR/policy.json -MAGNUM_API_PASTE=$MAGNUM_CONF_DIR/api-paste.ini - -if is_ssl_enabled_service "magnum" || is_service_enabled tls-proxy; then - MAGNUM_SERVICE_PROTOCOL="https" -fi - -# Public facing bits -MAGNUM_SERVICE_HOST=${MAGNUM_SERVICE_HOST:-$HOST_IP} -MAGNUM_SERVICE_PORT=${MAGNUM_SERVICE_PORT:-9511} -MAGNUM_SERVICE_PORT_INT=${MAGNUM_SERVICE_PORT_INT:-19511} -MAGNUM_SERVICE_PROTOCOL=${MAGNUM_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD=${MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD:-secret} - -MAGNUM_SWIFT_REGISTRY_CONTAINER=${MAGNUM_SWIFT_REGISTRY_CONTAINER:-docker_registry} - -# Support entry points installation of console scripts -if [[ -d $MAGNUM_DIR/bin ]]; then - MAGNUM_BIN_DIR=$MAGNUM_DIR/bin -else - MAGNUM_BIN_DIR=$(get_python_exec_prefix) -fi - -MAGNUM_CONFIGURE_IPTABLES=${MAGNUM_CONFIGURE_IPTABLES:-True} - -# Functions -# --------- - -# Test if any magnum services are enabled -# is_magnum_enabled -function is_magnum_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"magnum-" ]] && return 0 - return 1 -} -# cleanup_magnum() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_magnum { - sudo rm -rf $MAGNUM_STATE_PATH $MAGNUM_AUTH_CACHE_DIR -} - -# configure_magnum() - Set config files, create data dirs, etc -function configure_magnum { - # Put config files in ``/etc/magnum`` for everyone to find - if [[ ! -d $MAGNUM_CONF_DIR ]]; then - sudo mkdir -p $MAGNUM_CONF_DIR - sudo chown $STACK_USER $MAGNUM_CONF_DIR - fi - - install_default_policy magnum - # Rebuild the config file from scratch - create_magnum_conf - - create_api_paste_conf -} - -# create_magnum_accounts() - Set up common required magnum accounts -# -# Project User Roles -# ------------------------------------------------------------------ -# SERVICE_PROJECT_NAME magnum service -function create_magnum_accounts { - - create_service_user "magnum" "admin" - - local magnum_service=$(get_or_create_service "magnum" \ - "container-infra" "Container Infrastructure Management Service") - get_or_create_endpoint $magnum_service \ - "$REGION_NAME" \ - "$MAGNUM_SERVICE_PROTOCOL://$MAGNUM_SERVICE_HOST:$MAGNUM_SERVICE_PORT/v1" \ - "$MAGNUM_SERVICE_PROTOCOL://$MAGNUM_SERVICE_HOST:$MAGNUM_SERVICE_PORT/v1" \ - "$MAGNUM_SERVICE_PROTOCOL://$MAGNUM_SERVICE_HOST:$MAGNUM_SERVICE_PORT/v1" - -} - -# create_magnum_conf() - Create a new magnum.conf file -function create_magnum_conf { - - # (Re)create ``magnum.conf`` - rm -f $MAGNUM_CONF - HOSTNAME=`hostname` - iniset $MAGNUM_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - iniset $MAGNUM_CONF DEFAULT transport_url \ - "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST" - iniset $MAGNUM_CONF DEFAULT host "$HOSTNAME" - - iniset $MAGNUM_CONF database connection `database_connection_url magnum` - iniset $MAGNUM_CONF api host "$MAGNUM_SERVICE_HOST" - iniset $MAGNUM_CONF api port "$MAGNUM_SERVICE_PORT" - - iniset $MAGNUM_CONF oslo_policy policy_file $MAGNUM_POLICY_JSON - - iniset $MAGNUM_CONF keystone_auth auth_type password - iniset $MAGNUM_CONF keystone_auth username magnum - iniset $MAGNUM_CONF keystone_auth password $SERVICE_PASSWORD - iniset $MAGNUM_CONF keystone_auth project_name $SERVICE_PROJECT_NAME - iniset $MAGNUM_CONF keystone_auth project_domain_id default - iniset $MAGNUM_CONF keystone_auth user_domain_id default - - # FIXME(pauloewerton): keystone_authtoken section is deprecated. Remove it - # after deprecation period. - iniset $MAGNUM_CONF keystone_authtoken admin_user magnum - iniset $MAGNUM_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $MAGNUM_CONF keystone_authtoken admin_tenant_name $SERVICE_PROJECT_NAME - - configure_auth_token_middleware $MAGNUM_CONF magnum $MAGNUM_AUTH_CACHE_DIR - - iniset $MAGNUM_CONF keystone_auth auth_url $KEYSTONE_AUTH_URI_V3 - iniset $MAGNUM_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI_V3 - iniset $MAGNUM_CONF keystone_authtoken auth_url $KEYSTONE_AUTH_URI_V3 - iniset $MAGNUM_CONF keystone_authtoken auth_version v3 - - if is_fedora || is_suse; then - # magnum defaults to /usr/local/bin, but fedora and suse pip like to - # install things in /usr/bin - iniset $MAGNUM_CONF DEFAULT bindir "/usr/bin" - fi - - if [ -n "$MAGNUM_STATE_PATH" ]; then - iniset $MAGNUM_CONF DEFAULT state_path "$MAGNUM_STATE_PATH" - iniset $MAGNUM_CONF oslo_concurrency lock_path "$MAGNUM_STATE_PATH" - fi - - if [ "$SYSLOG" != "False" ]; then - iniset $MAGNUM_CONF DEFAULT use_syslog "True" - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $MAGNUM_CONF DEFAULT - else - # Show user_name and project_name instead of user_id and project_id - iniset $MAGNUM_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" - fi - - # Register SSL certificates if provided - if is_ssl_enabled_service magnum; then - ensure_certificates MAGNUM - - iniset $MAGNUM_CONF DEFAULT ssl_cert_file "$MAGNUM_SSL_CERT" - iniset $MAGNUM_CONF DEFAULT ssl_key_file "$MAGNUM_SSL_KEY" - - iniset $MAGNUM_CONF DEFAULT enabled_ssl_apis "$MAGNUM_ENABLED_APIS" - fi - - if is_service_enabled ceilometer; then - iniset $MAGNUM_CONF oslo_messaging_notifications driver "messaging" - fi - - if is_service_enabled barbican; then - iniset $MAGNUM_CONF certificates cert_manager_type "barbican" - else - iniset $MAGNUM_CONF certificates cert_manager_type "x509keypair" - fi - - trustee_domain_id=$(get_or_create_domain magnum 'Owns users and projects created by magnum') - trustee_domain_admin_id=$(get_or_create_user trustee_domain_admin $MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD $trustee_domain_id) - openstack --os-auth-url $KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version 3 role add \ - --user $trustee_domain_admin_id --domain $trustee_domain_id \ - admin - iniset $MAGNUM_CONF trust cluster_user_trust True - iniset $MAGNUM_CONF trust trustee_domain_name magnum - iniset $MAGNUM_CONF trust trustee_domain_admin_name trustee_domain_admin - iniset $MAGNUM_CONF trust trustee_domain_admin_password $MAGNUM_TRUSTEE_DOMAIN_ADMIN_PASSWORD - iniset $MAGNUM_CONF trust trustee_keystone_interface public - iniset $MAGNUM_CONF cinder_client region_name $REGION_NAME - - if is_service_enabled swift; then - iniset $MAGNUM_CONF docker_registry swift_region $REGION_NAME - iniset $MAGNUM_CONF docker_registry swift_registry_container $MAGNUM_SWIFT_REGISTRY_CONTAINER - fi - - # Get the default volume type from cinder.conf and set the coresponding - # default in magnum.conf - default_volume_type=$(iniget /etc/cinder/cinder.conf DEFAULT default_volume_type) - iniset $MAGNUM_CONF cinder default_docker_volume_type $default_volume_type -} - -function create_api_paste_conf { - # copy api_paste.ini - cp $MAGNUM_DIR/etc/magnum/api-paste.ini $MAGNUM_API_PASTE -} - -# create_magnum_cache_dir() - Part of the init_magnum() process -function create_magnum_cache_dir { - # Create cache dir - sudo mkdir -p $MAGNUM_AUTH_CACHE_DIR - sudo chown $STACK_USER $MAGNUM_AUTH_CACHE_DIR - rm -f $MAGNUM_AUTH_CACHE_DIR/* -} - - -# init_magnum() - Initialize databases, etc. -function init_magnum { - # Only do this step once on the API node for an entire cluster. - if is_service_enabled $DATABASE_BACKENDS && is_service_enabled magnum-api; then - # (Re)create magnum database - recreate_database magnum - - # Migrate magnum database - $MAGNUM_BIN_DIR/magnum-db-manage upgrade - fi - create_magnum_cache_dir -} - -# magnum_register_image - Register heat image for magnum with property os_distro -function magnum_register_image { - local magnum_image_property="--property os_distro=" - - local atomic="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io 'atomic' || true;)" - if [ ! -z "$atomic" ]; then - magnum_image_property=$magnum_image_property"fedora-atomic" - fi - local ubuntu="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io "ubuntu" || true;)" - if [ ! -z "$ubuntu" ]; then - magnum_image_property=$magnum_image_property"ubuntu" - fi - local coreos="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -io "coreos" || true;)" - if [ ! -z "$coreos" ]; then - magnum_image_property=$magnum_image_property"coreos" - fi - # os_distro property for fedora ironic image - local fedora_ironic="$(echo $MAGNUM_GUEST_IMAGE_URL | grep -i "ironic" \ - | grep -io "fedora" || true;)" - if [ ! -z "$fedora_ironic" ]; then - magnum_image_property=$magnum_image_property"fedora" - fi - - # get the image name - local image_filename=$(basename "$MAGNUM_GUEST_IMAGE_URL") - local image_name="" - for extension in "tgz" "img" "qcow2" "iso" "vhd" "vhdx" "tar.gz" "img.gz" "img.bz2" "vhd.gz" "vhdx.gz" - do - if [ $(expr match "${image_filename}" ".*\.${extension}$") -ne 0 ]; then - image_name=$(basename "$image_filename" ".${extension}") - break - fi - done - if [ -z ${image_name} ]; then - echo "Unknown image extension in $image_filename, supported extensions: tgz, img, qcow2, iso, vhd, vhdx, tar.gz, img.gz, img.bz2, vhd.gz, vhdx.gz"; false - fi - - openstack --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT --os-image-api-version 2 image set $image_name $magnum_image_property -} - -# install_magnumclient() - Collect source and prepare -function install_magnumclient { - if use_library_from_git "python-magnumclient"; then - git_clone_by_name "python-magnumclient" - setup_dev_lib "python-magnumclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-magnumclient"]}/tools/,/etc/bash_completion.d/}magnum.bash_completion - fi -} - -# install_magnum() - Collect source and prepare -function install_magnum { - git_clone $MAGNUM_REPO $MAGNUM_DIR $MAGNUM_BRANCH - setup_develop $MAGNUM_DIR -} - -# start_magnum_api() - Start the API process ahead of other things -function start_magnum_api { - # Get right service port for testing - local service_port=$MAGNUM_SERVICE_PORT - local service_protocol=$MAGNUM_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$MAGNUM_SERVICE_PORT_INT - service_protocol="http" - fi - - run_process magnum-api "$MAGNUM_BIN_DIR/magnum-api" - echo "Waiting for magnum-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$MAGNUM_SERVICE_HOST:$service_port; then - die $LINENO "magnum-api did not start" - fi - - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $MAGNUM_SERVICE_PORT $MAGNUM_SERVICE_HOST $MAGNUM_SERVICE_PORT_INT & - start_tls_proxy '*' $EC2_SERVICE_PORT $MAGNUM_SERVICE_HOST $EC2_SERVICE_PORT_INT & - fi -} - - -# configure_iptables_magnum() - Configure the IP table rules for Magnum -function configure_iptables_magnum { - if [ "$MAGNUM_CONFIGURE_IPTABLES" != "False" ]; then - ROUTE_TO_INTERNET=$(ip route get 8.8.8.8) - OBOUND_DEV=$(echo ${ROUTE_TO_INTERNET#*dev} | awk '{print $1}') - sudo iptables -t nat -A POSTROUTING -o $OBOUND_DEV -j MASQUERADE - # bay nodes will access magnum-api (port $MAGNUM_SERVICE_PORT) to get CA certificate. - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $MAGNUM_SERVICE_PORT -j ACCEPT || true - # allow access to keystone etc (http and https) - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 80 -j ACCEPT || true - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 443 -j ACCEPT || true - fi -} - - -# start_magnum() - Start running processes, including screen -function start_magnum { - - # ``run_process`` checks ``is_service_enabled``, it is not needed here - start_magnum_api - run_process magnum-cond "$MAGNUM_BIN_DIR/magnum-conductor" -} - -# stop_magnum() - Stop running processes (non-screen) -function stop_magnum { - for serv in magnum-api magnum-cond; do - stop_process $serv - done -} - - -# Restore xtrace -$XTRACE diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100755 index 81342bfd..00000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,56 +0,0 @@ -# magnum.sh - Devstack extras script to install magnum - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - -echo_summary "magnum's plugin.sh was called..." -source $DEST/magnum/devstack/lib/magnum -(set -o posix; set) - -if is_service_enabled magnum-api magnum-cond; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing magnum" - install_magnum - - MAGNUM_GUEST_IMAGE_URL=${MAGNUM_GUEST_IMAGE_URL:-"https://fedorapeople.org/groups/magnum/fedora-atomic-latest.qcow2"} - IMAGE_URLS+=",${MAGNUM_GUEST_IMAGE_URL}" - - LIBS_FROM_GIT="${LIBS_FROM_GIT},python-magnumclient" - - install_magnumclient - cleanup_magnum - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring magnum" - configure_magnum - - # Hack a large timeout for now - iniset /etc/keystone/keystone.conf token expiration 7200 - - if is_service_enabled key; then - create_magnum_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize magnum - init_magnum - magnum_register_image - - # Start the magnum API and magnum taskmgr components - echo_summary "Starting magnum" - start_magnum - - configure_iptables_magnum - fi - - if [[ "$1" == "unstack" ]]; then - stop_magnum - fi - - if [[ "$1" == "clean" ]]; then - cleanup_magnum - fi -fi - -# Restore xtrace -$XTRACE diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index cb217e76..00000000 --- a/devstack/settings +++ /dev/null @@ -1,29 +0,0 @@ -# Devstack settings - -## Modify to your environment -# FLOATING_RANGE=192.168.1.224/27 -# PUBLIC_NETWORK_GATEWAY=192.168.1.225 -# PUBLIC_INTERFACE=em1 -# FIXED_RANGE=10.0.0.0/24 -## Log all output to files -# LOGFILE=$HOME/devstack.log -# SCREEN_LOGDIR=$HOME/logs -## Neutron settings -# Q_USE_SECGROUP=True -# ENABLE_TENANT_VLANS=True -# TENANT_VLAN_RANGE= -# PHYSICAL_NETWORK=public -# OVS_PHYSICAL_BRIDGE=br-ex - -# Enable Neutron which is required by Magnum and disable nova-network. -disable_service n-net -enable_service q-svc -enable_service q-agt -enable_service q-dhcp -enable_service q-l3 -enable_service q-meta -enable_service neutron - -# Enable Magnum services -enable_service magnum-api -enable_service magnum-cond diff --git a/doc/examples/etc/init/magnum-api.conf b/doc/examples/etc/init/magnum-api.conf deleted file mode 100644 index 6f705926..00000000 --- a/doc/examples/etc/init/magnum-api.conf +++ /dev/null @@ -1,13 +0,0 @@ -description "Magnum API server" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -exec start-stop-daemon --start --chuid magnum \ ---chdir /var/lib/magnum \ ---name magnum-api \ ---exec /var/lib/magnum/env/bin/magnum-api -- \ ---config-file=/etc/magnum/magnum.conf \ ---log-file=/var/log/magnum/magnum-api.log diff --git a/doc/examples/etc/init/magnum-conductor.conf b/doc/examples/etc/init/magnum-conductor.conf deleted file mode 100644 index 3454d2d3..00000000 --- a/doc/examples/etc/init/magnum-conductor.conf +++ /dev/null @@ -1,13 +0,0 @@ -description "Magnum conductor" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -exec start-stop-daemon --start --chuid magnum \ ---chdir /var/lib/magnum \ ---name magnum-conductor \ ---exec /var/lib/magnum/env/bin/magnum-conductor -- \ ---config-file=/etc/magnum/magnum.conf \ ---log-file=/var/log/magnum/magnum-conductor.log diff --git a/doc/examples/etc/logrotate.d/magnum.logrotate b/doc/examples/etc/logrotate.d/magnum.logrotate deleted file mode 100644 index ceca98fe..00000000 --- a/doc/examples/etc/logrotate.d/magnum.logrotate +++ /dev/null @@ -1,7 +0,0 @@ -/var/log/magnum/*.log { - rotate 14 - size 10M - missingok - compress - copytruncate -} diff --git a/doc/examples/etc/systemd/system/magnum-api.service b/doc/examples/etc/systemd/system/magnum-api.service deleted file mode 100644 index 2f91a73d..00000000 --- a/doc/examples/etc/systemd/system/magnum-api.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=OpenStack Magnum API Service -After=syslog.target network.target - -[Service] -Type=simple -User=magnum -ExecStart=/var/lib/magnum/env/bin/magnum-api -PrivateTmp=true -NotifyAccess=all -KillMode=process -Restart=on-failure - -[Install] -WantedBy=multi-user.target diff --git a/doc/examples/etc/systemd/system/magnum-conductor.service b/doc/examples/etc/systemd/system/magnum-conductor.service deleted file mode 100644 index 9864a547..00000000 --- a/doc/examples/etc/systemd/system/magnum-conductor.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Openstack Magnum Conductor Service -After=syslog.target network.target qpidd.service mysqld.service tgtd.service - -[Service] -Type=simple -User=magnum -ExecStart=/var/lib/magnum/env/bin/magnum-conductor -PrivateTmp=true -NotifyAccess=all -KillMode=process -Restart=on-failure - -[Install] -WantedBy=multi-user.target diff --git a/doc/source/admin/configuring.rst b/doc/source/admin/configuring.rst deleted file mode 100644 index c0aabac1..00000000 --- a/doc/source/admin/configuring.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. - Copyright 2016 Hewlett Packard Enterprise Development Company LP - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Configuration -============= - -Magnum has a number of configuration options which will be detailed here. - -Magnum Config -------------- - -The magnum configuration file is called ``magnum.conf``. - -Magnum Pipeline ---------------- - -The pipeline details are contained in ``api-paste.ini``. - -Healthcheck Middleware -~~~~~~~~~~~~~~~~~~~~~~ - -This piece of middleware creates an endpoint that allows a load balancer -to probe if the API endpoint should be available at the node or not. - -The healthcheck middleware should be placed early in the pipeline. Which -is located in your ``api-paste.ini`` under a section called -``[filter:healthcheck]``. It should look like this:: - - [filter:healthcheck] - paste.filter_factory = oslo_middleware:Healthcheck.factory - backends = disable_by_file - disable_by_file_path = /etc/magnum/healthcheck_disable - -The main pipeline using this filter should look something like this also -defined in the ``api-paste.ini``:: - - [pipeline:main] - pipeline = cors healthcheck request_id authtoken api_v1 - -If you wish to disable a middleware without taking it out of the -pipeline, you can create a file under the file path defined by -``disable_by_file_path`` ie. ``/etc/magnum/healthcheck_disable``. - -For more information see -`oslo.middleware `_. diff --git a/doc/source/admin/gmr.rst b/doc/source/admin/gmr.rst deleted file mode 100644 index a4513ef3..00000000 --- a/doc/source/admin/gmr.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. - Copyright (c) 2014 OpenStack Foundation - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Guru Meditation Reports -======================= - -Magnum contains a mechanism whereby developers and system administrators can -generate a report about the state of a running Magnum executable. This report -is called a *Guru Meditation Report* (*GMR* for short). - -Generating a GMR ----------------- - -A *GMR* can be generated by sending the *USR2* signal to any Magnum process -with support (see below). The *GMR* will then be outputted as standard error -for that particular process. - -For example, suppose that ``magnum-api`` has process id ``8675``, and was run -with ``2>/var/log/magnum/magnum-api-err.log``. Then, ``kill -USR2 8675`` will -trigger the Guru Meditation report to be printed to -``/var/log/magnum/magnum-api-err.log``. - -Structure of a GMR ------------------- - -The *GMR* is designed to be extensible; any particular executable may add its -own sections. However, the base *GMR* consists of several sections: - -Package - Shows information about the package to which this process belongs, including - version information. - -Threads - Shows stack traces and thread ids for each of the threads within this - process. - -Green Threads - Shows stack traces for each of the green threads within this process (green - threads don't have thread ids). - -Configuration - Lists all the configuration options currently accessible via the CONF object - for the current process. - -Adding Support for GMRs to New Executables ------------------------------------------- - -Adding support for a *GMR* to a given executable is fairly easy. - -First import the module: - -.. code-block:: python - - from oslo_reports import guru_meditation_report as gmr - from magnum import version - -Then, register any additional sections (optional): - -.. code-block:: python - - TextGuruMeditation.register_section('Some Special Section', - some_section_generator) - -Finally (under main), before running the "main loop" of the executable (usually -``service.server(server)`` or something similar), register the *GMR* hook: - -.. code-block:: python - - TextGuruMeditation.setup_autorun(version) - -Extending the GMR ------------------ - -As mentioned above, additional sections can be added to the GMR for a -particular executable. For more information, see the inline documentation -under :mod:`oslo.reports` diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index b0b99c5c..00000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -Adminstrator's Guide -==================== - -Installation & Operations -------------------------- - -If you are a system administrator running Magnum, this section contains -information that should help you understand how to deploy, operate, and upgrade -the services. - -.. toctree:: - :maxdepth: 1 - - Magnum Proxy - gmr - Troubleshooting FAQ - -Configuration -------------- - -Following pages will be helpful in configuring specific aspects -of Magnum that may or may not be suitable to every situation. - -.. toctree:: - :maxdepth: 1 - - configuring diff --git a/doc/source/admin/magnum-proxy.rst b/doc/source/admin/magnum-proxy.rst deleted file mode 100644 index 7faa6281..00000000 --- a/doc/source/admin/magnum-proxy.rst +++ /dev/null @@ -1,72 +0,0 @@ -================================================= -Using Proxies in magnum if running under firewall -================================================= - -If you are running magnum behind a firewall then you may need a proxy -for using services like docker, kubernetes and mesos. Use these steps -when your firewall will not allow you to use those services without a -proxy. - -**NOTE:** This feature has only been tested with the supported cluster type -and associated image: Kubernetes and Swarm use the Fedora Atomic -image, and Mesos uses the Ubuntu image. - -Proxy Parameters to define before use -===================================== - -1. http-proxy - -Address of a proxy that will receive all HTTP requests and relay -them. The format is a URL including a port number. For example: -http://10.11.12.13:8000 or http://abcproxy.com:8000 - -2. https-proxy - -Address of a proxy that will receive all HTTPS requests and relay -them. The format is a URL including a port number. For example: -https://10.11.12.13:8000 or https://abcproxy.com:8000 - -3. no-proxy - -A comma separated list of IP addresses or hostnames that should bypass -your proxy, and make connections directly. - -**NOTE:** You may not express networks/subnets. It only accepts names -and ip addresses. Bad example: 192.168.0.0/28. - -Steps to configure proxies. -============================== - -You can specify all three proxy parameters while creating ClusterTemplate of -any coe type. All of proxy parameters are optional. - - magnum cluster-template-create k8s-cluster-template \ - --image fedora-atomic-latest \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --coe kubernetes \ - --http-proxy \ - --https-proxy \ - --no-proxy <172.24.4.4,172.24.4.9,172.24.4.8> - magnum cluster-template-create swarm-cluster-template \ - --image fedora-atomic-latest \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --coe swarm \ - --http-proxy \ - --https-proxy \ - --no-proxy <172.24.4.4,172.24.4.9,172.24.4.8> - magnum cluster-template-create mesos-cluster-template \ - --image ubuntu-mesos \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --coe mesos \ - --http-proxy \ - --https-proxy \ - --no-proxy <172.24.4.4,172.24.4.9,172.24.4.8> diff --git a/doc/source/admin/troubleshooting-guide.rst b/doc/source/admin/troubleshooting-guide.rst deleted file mode 100644 index e433655e..00000000 --- a/doc/source/admin/troubleshooting-guide.rst +++ /dev/null @@ -1,804 +0,0 @@ -============================ -Magnum Troubleshooting Guide -============================ - -This guide is intended for users who use Magnum to deploy and manage -clusters of hosts for a Container Orchestration Engine. It describes -common failure conditions and techniques for troubleshooting. To help -the users quickly identify the relevant information, the guide is -organized as a list of failure symptoms: each has some suggestions -with pointers to the details for troubleshooting. - -A separate section `for developers`_ describes useful techniques such as -debugging unit tests and gate tests. - -Failure symptoms -================ - -My cluster-create takes a really long time - If you are using devstack on a small VM, cluster-create will take a long - time and may eventually fail because of insufficient resources. - Another possible reason is that a process on one of the nodes is hung - and heat is still waiting on the signal. In this case, it will eventually - fail with a timeout, but since heat has a long default timeout, you can - look at the `heat stacks`_ and check the WaitConditionHandle resources. - -My cluster-create fails with error: "Failed to create trustee XXX in domain XXX" - Check the `trustee for cluster`_ - -Kubernetes cluster-create fails - Check the `heat stacks`_, log into the master nodes and check the - `Kubernetes services`_ and `etcd service`_. - -Swarm cluster-create fails - Check the `heat stacks`_, log into the master nodes and check the `Swarm - services`_ and `etcd service`_. - -Mesos cluster-create fails - Check the `heat stacks`_, log into the master nodes and check the `Mesos - services`_. - -I get the error "Timed out waiting for a reply" when deploying a pod - Verify the `Kubernetes services`_ and `etcd service`_ are running on the - master nodes. - -I deploy pods on Kubernetes cluster but the status stays "Pending" - The pod status is "Pending" while the Docker image is being downloaded, - so if the status does not change for a long time, log into the minion - node and check for `Cluster internet access`_. - -I deploy pods and services on Kubernetes cluster but the app is not working - The pods and services are running and the status looks correct, but - if the app is performing communication between pods through services, - verify `Kubernetes networking`_. - -Swarm cluster is created successfully but I cannot deploy containers - Check the `Swarm services`_ and `etcd service`_ on the master nodes. - -Mesos cluster is created successfully but I cannot deploy containers on Marathon - Check the `Mesos services`_ on the master node. - -I get a "Protocol violation" error when deploying a container - For Kubernetes, check the `Kubernetes services`_ to verify that - kube-apiserver is running to accept the request. - Check `TLS`_ and `Barbican service`_. - -My cluster-create fails with a resource error on docker_volume - Check for available volume space on Cinder and the `request volume - size`_ in the heat template. - Run "nova volume-list" to check the volume status. - - -Troubleshooting details -======================= - -Heat stacks ------------ -*To be filled in* - -A cluster is deployed by a set of heat stacks: one top level stack and several -nested stack. The stack names are prefixed with the cluster name and the -nested stack names contain descriptive internal names like *kube_masters*, -*kube_minions*. - -To list the status of all the stacks for a cluster: - - heat stack-list -n | grep *cluster-name* - -If the cluster has failed, then one or more of the heat stacks would have -failed. From the stack list above, look for the stacks that failed, then -look for the particular resource(s) that failed in the failed stack by: - - heat resource-list *failed-stack-name* | grep "FAILED" - -The resource_type of the failed resource should point to the OpenStack -service, e.g. OS::Cinder::Volume. Check for more details on the failure by: - - heat resource-show *failed-stack-name* *failed-resource-name* - -The resource_status_reason may give an indication on the failure, although -in some cases it may only say "Unknown". - -If the failed resource is OS::Heat::WaitConditionHandle, this indicates that -one of the services that are being started on the node is hung. Log into the -node where the failure occurred and check the respective `Kubernetes -services`_, `Swarm services`_ or `Mesos services`_. If the failure is in -other scripts, look for them as `Heat software resource scripts`_. - - -Trustee for cluster -------------------- -When a user creates a cluster, Magnum will dynamically create a service account -for the cluster. The service account will be used by the cluster to -access the OpenStack services (i.e. Neutron, Swift, etc.). A trust relationship -will be created between the user who created the cluster (the "trustor") and -the service account created for the cluster (the "trustee"). For details, -please refer -`_. - -If Magnum fails to create the trustee, check the magnum config file (usually -in /etc/magnum/magnum.conf). Make sure 'trustee_*' and 'auth_uri' are set and -their values are correct: - - [keystone_authtoken] - auth_uri = http://controller:5000/v3 - ... - - [trust] - trustee_domain_admin_password = XXX - trustee_domain_admin_id = XXX - trustee_domain_id = XXX - -If the 'trust' group is missing, you might need to create the trustee domain -and the domain admin: - -.. code-block:: bash - - source /opt/stack/devstack/accrc/admin/admin - export OS_IDENTITY_API_VERSION=3 - unset OS_AUTH_TYPE - openstack domain create magnum - openstack user create trustee_domain_admin --password=secret \ - --domain=magnum - openstack role add --user=trustee_domain_admin --user-domain magnum --domain=magnum admin - - source /opt/stack/devstack/functions - export MAGNUM_CONF=/etc/magnum/magnum.conf - iniset $MAGNUM_CONF trust trustee_domain_id \ - $(openstack domain show magnum | awk '/ id /{print $4}') - iniset $MAGNUM_CONF trust trustee_domain_admin_id \ - $(openstack user show trustee_domain_admin | awk '/ id /{print $4}') - iniset $MAGNUM_CONF trust trustee_domain_admin_password secret - -Then, restart magnum-api and magnum-cond to pick up the new configuration. -If the problem still exists, you might want to manually verify your domain -admin credential to ensure it has the right privilege. To do that, run the -script below with the credentials replaced (you must use the IDs where -specified). If it fails, that means the credential you provided is invalid. - -.. code-block:: python - - from keystoneauth1.identity import v3 as ka_v3 - from keystoneauth1 import session as ka_session - from keystoneclient.v3 import client as kc_v3 - - auth = ka_v3.Password( - auth_url=YOUR_AUTH_URI, - user_id=YOUR_TRUSTEE_DOMAIN_ADMIN_ID, - domain_id=YOUR_TRUSTEE_DOMAIN_ID, - password=YOUR_TRUSTEE_DOMAIN_ADMIN_PASSWORD) - - session = ka_session.Session(auth=auth) - domain_admin_client = kc_v3.Client(session=session) - user = domain_admin_client.users.create( - name='anyname', - password='anypass') - - -TLS ---- -*To be filled in* - - -Barbican service ----------------- -*To be filled in* - - -Cluster internet access ------------------------ -The nodes for Kubernetes, Swarm and Mesos are connected to a private -Neutron network, so to provide access to the external internet, a router -connects the private network to a public network. With devstack, the -default public network is "public", but this can be replaced by the -parameter "external-network" in the ClusterTemplate. The "public" network -with devstack is actually not a real external network, so it is in turn -routed to the network interface of the host for devstack. This is -configured in the file local.conf with the variable PUBLIC_INTERFACE, -for example:: - - PUBLIC_INTERFACE=eth1 - -If the route to the external internet is not set up properly, the ectd -discovery would fail (if using public discovery) and container images -cannot be downloaded, among other failures. - -First, check for connectivity to the external internet by pinging -an external IP (the IP shown here is an example; use an IP that -works in your case):: - - ping 8.8.8.8 - -If the ping fails, there is no route to the external internet. -Check the following: - -- Is PUBLIC_INTERFACE in devstack/local.conf the correct network - interface? Does this interface have a route to the external internet? -- If "external-network" is specified in the ClusterTemplate, does this - network have a route to the external internet? -- Is your devstack environment behind a firewall? This can be the case for some - enterprises or countries. In this case, consider using a `proxy server - `_. -- Is the traffic blocked by the security group? Check the - `rules of security group - `_. -- Is your host NAT'ing your internal network correctly? Check your host - `iptables `_. -- Use *tcpdump* for `networking troubleshooting - `_. - You can run *tcpdump* on the interface *docker0, flannel0* and *eth0* on the - node and then run *ping* to see the path of the message from the container. - -If ping is successful, check that DNS is working:: - - wget google.com - -If DNS works, you should get back a few lines of HTML text. - -If the name lookup fails, check the following: - -- Is the DNS entry correct in the subnet? Try "neutron subnet-show - " for the private subnet and check dns_nameservers. - The IP should be either the default public DNS 8.8.8.8 or the value - specified by "dns-nameserver" in the ClusterTemplate. -- If you are using your own DNS server by specifying "dns-nameserver" - in the ClusterTemplate, is it reachable and working? -- More help on `DNS troubleshooting `_. - - -Kubernetes networking ---------------------- - -The networking between pods is different and separate from the neutron -network set up for the cluster. -Kubernetes presents a flat network space for the pods and services -and uses different network drivers to provide this network model. - -It is possible for the pods to come up correctly and be able to connect -to the external internet, but they cannot reach each other. -In this case, the app in the pods may not be working as expected. -For example, if you are trying the `redis example -`_, -the key:value may not be replicated correctly. In this case, use the -following steps to verify the inter-pods networking and pinpoint problems. - -Since the steps are specific to the network drivers, refer to the -particular driver being used for the cluster. - -Using Flannel as network driver -............................... - -Flannel is the default network driver for Kubernetes clusters. Flannel is -an overlay network that runs on top of the neutron network. It works by -encapsulating the messages between pods and forwarding them to the -correct node that hosts the target pod. - -First check the connectivity at the node level. Log into two -different minion nodes, e.g. node A and node B, run a docker container -on each node, attach to the container and find the IP. - -For example, on node A:: - - sudo docker run -it alpine - # ip -f inet -o a | grep eth0 | awk '{print $4}' - 10.100.54.2/24 - -Similarly, on node B:: - - sudo docker run -it alpine - # ip -f inet -o a | grep eth0 | awk '{print $4}' - 10.100.49.3/24 - -Check that the containers can see each other by pinging from one to another. - -On node A:: - - # ping 10.100.49.3 - PING 10.100.49.3 (10.100.49.3): 56 data bytes - 64 bytes from 10.100.49.3: seq=0 ttl=60 time=1.868 ms - 64 bytes from 10.100.49.3: seq=1 ttl=60 time=1.108 ms - -Similarly, on node B:: - - # ping 10.100.54.2 - PING 10.100.54.2 (10.100.54.2): 56 data bytes - 64 bytes from 10.100.54.2: seq=0 ttl=60 time=2.678 ms - 64 bytes from 10.100.54.2: seq=1 ttl=60 time=1.240 ms - -If the ping is not successful, check the following: - -- Is neutron working properly? Try pinging between the VMs. - -- Are the docker0 and flannel0 interfaces configured correctly on the - nodes? Log into each node and find the Flannel CIDR by:: - - cat /run/flannel/subnet.env | grep FLANNEL_SUBNET - FLANNEL_SUBNET=10.100.54.1/24 - - Then check the interfaces by:: - - ifconfig flannel0 - ifconfig docker0 - - The correct configuration should assign flannel0 with the "0" address - in the subnet, like *10.100.54.0*, and docker0 with the "1" address, like - *10.100.54.1*. - -- Verify the IP's assigned to the nodes as found above are in the correct - Flannel subnet. If this is not correct, the docker daemon is not configured - correctly with the parameter *--bip*. Check the systemd service for docker. - -- Is Flannel running properly? check the `Running Flannel`_. - -- Ping and try `tcpdump - `_ - on each network interface along the path between two nodes - to see how far the message is able to travel. - The message path should be as follows: - - 1. Source node: docker0 - 2. Source node: flannel0 - 3. Source node: eth0 - 4. Target node: eth0 - 5. Target node: flannel0 - 6. Target node: docker0 - -If ping works, this means the flannel overlay network is functioning -correctly. - -The containers created by Kubernetes for pods will be on the same IP -subnet as the containers created directly in Docker as above, so they -will have the same connectivity. However, the pods still may not be -able to reach each other because normally they connect through some -Kubernetes services rather than directly. The services are supported -by the kube-proxy and rules inserted into the iptables, therefore -their networking paths have some extra hops and there may be problems -here. - -To check the connectivity at the Kubernetes pod level, log into the -master node and create two pods and a service for one of the pods. -You can use the examples provided in the directory -*/etc/kubernetes/examples/* for the first pod and service. This will -start up an nginx container and a Kubernetes service to expose the -endpoint. Create another manifest for a second pod to test the -endpoint:: - - cat > alpine.yaml << END - apiVersion: v1 - kind: Pod - metadata: - name: alpine - spec: - containers: - - name: alpine - image: alpine - args: - - sleep - - "1000000" - END - - kubectl create -f /etc/kubernetes/examples/pod-nginx-with-label.yaml - kubectl create -f /etc/kubernetes/examples/service.yaml - kubectl create -f alpine.yaml - -Get the endpoint for the nginx-service, which should route message to the pod -nginx:: - - kubectl describe service nginx-service | grep -e IP: -e Port: - IP: 10.254.21.158 - Port: 8000/TCP - -Note the IP and port to use for checking below. Log into the node -where the *alpine* pod is running. You can find the hosting node by -running this command on the master node:: - - kubectl get pods -o wide | grep alpine | awk '{print $6}' - k8-gzvjwcooto-0-gsrxhmyjupbi-kube-minion-br73i6ans2b4 - -To get the IP of the node, query Nova on devstack:: - - nova list - -On this hosting node, attach to the *alpine* container:: - - export DOCKER_ID=`sudo docker ps | grep k8s_alpine | awk '{print $1}'` - sudo docker exec -it $DOCKER_ID sh - -From the *alpine* pod, you can try to reach the nginx pod through the nginx -service using the IP and Port found above:: - - wget 10.254.21.158:8000 - -If the connection is successful, you should receive the file *index.html* from -nginx. - -If the connection is not successful, you will get an error message like::xs - - wget: can't connect to remote host (10.100.54.9): No route to host - -In this case, check the following: - -- Is kube-proxy running on the nodes? It runs as a container on each node. - check by logging in the minion nodes and run:: - - sudo docker ps | grep k8s_kube-proxy - -- Check the log from kube-proxy by running on the minion nodes:: - - export PROXY=`sudo docker ps | grep "hyperkube proxy" | awk '{print $1}'` - sudo docker logs $PROXY - -- Try additional `service debugging - `_. - To see what's going during provisioning:: - - kubectl get events - - To get information on a service in question:: - - kubectl describe services - - - -etcd service ------------- - -The etcd service is used by many other components for key/value pair -management, therefore if it fails to start, these other components -will not be running correctly either. -Check that etcd is running on the master nodes by:: - - sudo service etcd status -l - -If it is running correctly, you should see that the service is -successfully deployed:: - - Active: active (running) since .... - -The log message should show the service being published:: - - etcdserver: published {Name:10.0.0.5 ClientURLs:[http://10.0.0.5:2379]} to cluster 3451e4c04ec92893 - -In some cases, the service may show as *active* but may still be stuck -in discovery mode and not fully operational. The log message may show -something like:: - - discovery: waiting for other nodes: error connecting to https://discovery.etcd.io, retrying in 8m32s - -If this condition persists, check for `Cluster internet access`_. - -If the daemon is not running, the status will show the service as failed, -something like:: - - Active: failed (Result: timeout) - -In this case, try restarting etcd by:: - - sudo service etcd start - -If etcd continues to fail, check the following: - -- Check the log for etcd:: - - sudo journalctl -u etcd - -- etcd requires discovery, and the default discovery method is the - public discovery service provided by etcd.io; therefore, a common - cause of failure is that this public discovery service is not - reachable. Check by running on the master nodes:: - - source /etc/sysconfig/heat-params - curl $ETCD_DISCOVERY_URL - - You should receive something like:: - - {"action":"get", - "node":{"key":"/_etcd/registry/00a6b00064174c92411b0f09ad5466c6", - "dir":true, - "nodes":[ - {"key":"/_etcd/registry/00a6b00064174c92411b0f09ad5466c6/7d8a68781a20c0a5", - "value":"10.0.0.5=http://10.0.0.5:2380", - "modifiedIndex":978239406, - "createdIndex":978239406}], - "modifiedIndex":978237118, - "createdIndex":978237118} - } - - The list of master IP is provided by Magnum during cluster deployment, - therefore it should match the current IP of the master nodes. - If the public discovery service is not reachable, check the - `Cluster internet access`_. - -Running Flannel ---------------- - -When deploying a COE, Flannel is available as a network driver for -certain COE type. Magnum currently supports Flannel for a Kubernetes -or Swarm cluster. - -Flannel provides a flat network space for the containers in the cluster: -they are allocated IP in this network space and they will have connectivity -to each other. Therefore, if Flannel fails, some containers will not -be able to access services from other containers in the cluster. This can be -confirmed by running *ping* or *curl* from one container to another. - -The Flannel daemon is run as a systemd service on each node of the cluster. -To check Flannel, run on each node:: - - sudo service flanneld status - -If the daemon is running, you should see that the service is successfully -deployed:: - - Active: active (running) since .... - -If the daemon is not running, the status will show the service as failed, -something like:: - - Active: failed (Result: timeout) .... - -or:: - - Active: inactive (dead) .... - -Flannel daemon may also be running but not functioning correctly. -Check the following: - -- Check the log for Flannel:: - - sudo journalctl -u flanneld - -- Since Flannel relies on etcd, a common cause for failure is that the - etcd service is not running on the master nodes. Check the `etcd service`_. - If the etcd service failed, once it has been restored successfully, the - Flannel service can be restarted by:: - - sudo service flanneld restart - -- Magnum writes the configuration for Flannel in a local file on each master - node. Check for this file on the master nodes by:: - - cat /etc/sysconfig/flannel-network.json - - The content should be something like:: - - { - "Network": "10.100.0.0/16", - "Subnetlen": 24, - "Backend": { - "Type": "udp" - } - } - - where the values for the parameters must match the corresponding - parameters from the ClusterTemplate. - - Magnum also loads this configuration into etcd, therefore, verify - the configuration in etcd by running *etcdctl* on the master nodes:: - - . /etc/sysconfig/flanneld - etcdctl get $FLANNEL_ETCD_KEY/config - -- Each node is allocated a segment of the network space. Check - for this segment on each node by:: - - grep FLANNEL_SUBNET /run/flannel/subnet.env - - The containers on this node should be assigned an IP in this range. - The nodes negotiate for their segment through etcd, and you can use - *etcdctl* on the master node to query the network segment associated - with each node:: - - . /etc/sysconfig/flanneld - for s in `etcdctl ls $FLANNEL_ETCD_KEY/subnets` - do - echo $s - etcdctl get $s - done - - /atomic.io/network/subnets/10.100.14.0-24 - {"PublicIP":"10.0.0.5"} - /atomic.io/network/subnets/10.100.61.0-24 - {"PublicIP":"10.0.0.6"} - /atomic.io/network/subnets/10.100.92.0-24 - {"PublicIP":"10.0.0.7"} - - Alternatively, you can read the full record in ectd by:: - - curl http://:2379/v2/keys/coreos.com/network/subnets - - You should receive a JSON snippet that describes all the segments - allocated. - -- This network segment is passed to Docker via the parameter *--bip*. - If this is not configured correctly, Docker would not assign the correct - IP in the Flannel network segment to the container. Check by:: - - cat /run/flannel/docker - ps -aux | grep docker - -- Check the interface for Flannel:: - - ifconfig flannel0 - - The IP should be the first address in the Flannel subnet for this node. - -- Flannel has several different backend implementations and they have - specific requirements. The *udp* backend is the most general and have - no requirement on the network. The *vxlan* backend requires vxlan - support in the kernel, so ensure that the image used does provide - vxlan support. The *host-gw* backend requires that all the hosts are - on the same L2 network. This is currently met by the private Neutron - subnet created by Magnum; however, if other network topology is used - instead, ensure that this requirement is met if *host-gw* is used. - -Current known limitation: the image fedora-21-atomic-5.qcow2 has -Flannel version 0.5.0. This version has known bugs that prevent the -backend vxland and host-gw to work correctly. Only the backend udp -works for this image. Version 0.5.3 and later should work correctly. -The image fedora-21-atomic-7.qcow2 has Flannel version 0.5.5. - -Kubernetes services -------------------- -*To be filled in* - -(How to introspect k8s when heat works and k8s does not) - -Additional `Kubenetes troubleshooting guide -`_ is available. - -Swarm services --------------- -*To be filled in* - -(How to check on a swarm cluster: see membership information, view master, -agent containers) - -Mesos services --------------- -*To be filled in* - - -Barbican issues ---------------- -*To be filled in* - - -Docker CLI ----------- -*To be filled in* - - -Request volume size -------------------- -*To be filled in* - - -Heat software resource scripts ------------------------------- -*To be filled in* - - -For Developers -============== - -This section is intended to help with issues that developers may -run into in the course of their development adventures in Magnum. - -Troubleshooting in Gate ------------------------ - -Simulating gate tests - *Note*: This is adapted from Devstack Gate's `README`_ which - is worth a quick read to better understand the following) - - #. Boot a VM like described in the Devstack Gate's `README`_ . - #. Provision this VM like so:: - - apt-get update \ - && apt-get upgrade -y \ # Kernel upgrade, as recommended by README, select to keep existing grub config - && apt-get install -y git tmux vim \ - && git clone https://git.openstack.org/openstack-infra/system-config \ - && system-config/install_puppet.sh && system-config/install_modules.sh \ - && puppet apply \ - --modulepath=/root/system-config/modules:/etc/puppet/modules \ - -e "class { openstack_project::single_use_slave: install_users => false, - ssh_key => \"$( cat .ssh/authorized_keys | awk '{print $2}' )\" }" \ - && echo "jenkins ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers \ - && cat ~/.ssh/authorized_keys >> /home/jenkins/.ssh/authorized_keys - #. Compare ``~/.ssh/authorized_keys`` and ``/home/jenkins/.ssh/authorized_keys``. Your original public SSH key should now be in ``/home/jenkins/.ssh/authorized_keys``. If it's not, explicitly copy it (this can happen if you spin up a using ``--key-name ``, for example). - #. Assuming all is well up to this point, now it's time to ``reboot`` into the latest kernel - #. Once you're done booting into the new kernel, log back in as ``jenkins`` user to continue with setting up the simulation. - #. Now it's time to set up the workspace:: - - export REPO_URL=https://git.openstack.org - export WORKSPACE=/home/jenkins/workspace/testing - export ZUUL_URL=/home/jenkins/workspace-cache2 - export ZUUL_REF=HEAD - export ZUUL_BRANCH=master - export ZUUL_PROJECT=openstack/magnum - mkdir -p $WORKSPACE - git clone $REPO_URL/$ZUUL_PROJECT $ZUUL_URL/$ZUUL_PROJECT \ - && cd $ZUUL_URL/$ZUUL_PROJECT \ - && git checkout remotes/origin/$ZUUL_BRANCH - #. At this point, you may be wanting to test a specific change. If so, you can pull down the changes in ``$ZUUL_URL/$ZUUL_PROJECT`` directory:: - - cd $ZUUL_URL/$ZUUL_PROJECT \ - && git fetch https://review.openstack.org/openstack/magnum refs/changes/83/247083/12 && git checkout FETCH_HEAD - #. Now you're ready to pull down the ``devstack-gate`` scripts that will let you run the gate job on your own VM:: - - cd $WORKSPACE \ - && git clone --depth 1 $REPO_URL/openstack-infra/devstack-gate - #. And now you can kick off the job using the following script (the ``devstack-gate`` documentation suggests just copying from the job which can be found in the `project-config `_ repository), naturally it should be executable (``chmod u+x ``):: - - #!/bin/bash -xe - cat > clonemap.yaml << EOF - clonemap: - - name: openstack-infra/devstack-gate - dest: devstack-gate - EOF - /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ - git://git.openstack.org \ - openstack-infra/devstack-gate - export PYTHONUNBUFFERED=true - export DEVSTACK_GATE_TIMEOUT=240 # bump this if you see timeout issues. Default is 120 - export DEVSTACK_GATE_TEMPEST=0 - export DEVSTACK_GATE_NEUTRON=1 - # Enable tempest for tempest plugin - export ENABLED_SERVICES=tempest - export BRANCH_OVERRIDE="default" - if [ "$BRANCH_OVERRIDE" != "default" ] ; then - export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE - fi - export PROJECTS="openstack/magnum $PROJECTS" - export PROJECTS="openstack/python-magnumclient $PROJECTS" - export PROJECTS="openstack/barbican $PROJECTS" - export DEVSTACK_LOCAL_CONFIG="enable_plugin magnum git://git.openstack.org/openstack/magnum" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" - # Keep localrc to be able to set some vars in post_test_hook - export KEEP_LOCALRC=1 - function gate_hook { - cd /opt/stack/new/magnum/ - ./magnum/tests/contrib/gate_hook.sh api # change this to swarm to run swarm functional tests or k8s to run kubernetes functional tests - } - export -f gate_hook - function post_test_hook { - source $BASE/new/devstack/accrc/admin/admin - cd /opt/stack/new/magnum/ - ./magnum/tests/contrib/post_test_hook.sh api # change this to swarm to run swarm functional tests or k8s to run kubernetes functional tests - } - export -f post_test_hook - cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh - ./safe-devstack-vm-gate-wrap.sh - -Helpful nuances about the Devstack Gate - * Main job is in ``project-config``'s `magnum.yaml `_. - - * Must modify parameters passed in since those are escaped: - - * Anything with ``{}`` should be set as an environment variable - - * Anything with ``{{ }}`` should have those brackets changed to - single brackets - ``{}``. - - * As with the documentation for Devstack Gate, you can just create - a new file for the job you want, paste in what you want, then - ``chmod u+x `` and run it. - - * Parameters can be found in `projects.yaml `_. - This file changes a lot, so it's more reliable to say that you can - search for the magnum jobs where you'll see examples of what - gets passed in. - - * Three jobs are usually run as a part of Magnum gate, all of which are found in ``project-config``'s `macros.yml `_: - - * link-logs - - * net-info - - * devstack-checkout - - * After you run a job, it's ideal to clean up and start over with a - fresh VM to best simulate the Devstack Gate environment. - -.. _README: https://github.com/openstack-infra/devstack-gate/blob/master/README.rst#simulating-devstack-gate-tests P diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index e3912450..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.graphviz', - 'stevedore.sphinxext', - 'openstackdocstheme', - 'oslo_config.sphinxconfiggen', -] - -# openstackdocstheme options -repository_name = 'openstack/magnum' -bug_project = 'magnum' -bug_tag = '' - -config_generator_config_file = '../../etc/magnum/magnum-config-generator.conf' -sample_config_basename = '_static/magnum' - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'magnum' -copyright = u'2013, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# The short X.Y version. -from magnum.version import version_info as magnum_version -version = magnum_version.canonical_version_string() -# The full version, including alpha/beta/rc tags. -release = magnum_version.version_string_with_vcs() - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -# intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index c3eddc1c..00000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Sample Configuration File -------------------------- - -.. toctree:: - :maxdepth: 1 - - sample-config diff --git a/doc/source/configuration/sample-config.rst b/doc/source/configuration/sample-config.rst deleted file mode 100644 index f38c7e7e..00000000 --- a/doc/source/configuration/sample-config.rst +++ /dev/null @@ -1,13 +0,0 @@ -============================ -Magnum Configuration Options -============================ - -The following is a sample Magnum configuration for adaptation and use. It is -auto-generated from Magnum when this documentation is built, so -if you are having issues with an option, please compare your version of -Magnum with the version of this documentation. - -The sample configuration can also be viewed in :download:`file form -`. - -.. literalinclude:: /_static/magnum.conf.sample diff --git a/doc/source/contributor/api-microversion-history.rst b/doc/source/contributor/api-microversion-history.rst deleted file mode 100644 index 7b64ce41..00000000 --- a/doc/source/contributor/api-microversion-history.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../magnum/api/rest_api_version_history.rst diff --git a/doc/source/contributor/api-microversion.rst b/doc/source/contributor/api-microversion.rst deleted file mode 100644 index c8146ba2..00000000 --- a/doc/source/contributor/api-microversion.rst +++ /dev/null @@ -1,320 +0,0 @@ -API Microversions -================= - -Background ----------- - -Magnum uses a framework we call 'API Microversions' for allowing changes -to the API while preserving backward compatibility. The basic idea is -that a user has to explicitly ask for their request to be treated with -a particular version of the API. So breaking changes can be added to -the API without breaking users who don't specifically ask for it. This -is done with an HTTP header ``OpenStack-API-Version`` which has as its -value a string containing the name of the service, ``container-infra``, -and a monotonically increasing semantic version number starting -from ``1.1``. -The full form of the header takes the form:: - - OpenStack-API-Version: container-infra 1.1 - -If a user makes a request without specifying a version, they will get -the ``BASE_VER`` as defined in -``magnum/api/controllers/versions.py``. This value is currently ``1.1`` and -is expected to remain so for quite a long time. - - -When do I need a new Microversion? ----------------------------------- - -A microversion is needed when the contract to the user is -changed. The user contract covers many kinds of information such as: - -- the Request - - - the list of resource urls which exist on the server - - Example: adding a new clusters/{ID}/foo which didn't exist in a - previous version of the code - - - the list of query parameters that are valid on urls - - Example: adding a new parameter ``is_yellow`` clusters/{ID}?is_yellow=True - - - the list of query parameter values for non free form fields - - Example: parameter filter_by takes a small set of constants/enums "A", - "B", "C". Adding support for new enum "D". - - - new headers accepted on a request - - - the list of attributes and data structures accepted. - - Example: adding a new attribute 'locked': True/False to the request body - - -- the Response - - - the list of attributes and data structures returned - - Example: adding a new attribute 'locked': True/False to the output - of clusters/{ID} - - - the allowed values of non free form fields - - Example: adding a new allowed ``status`` to clusters/{ID} - - - the list of status codes allowed for a particular request - - Example: an API previously could return 200, 400, 403, 404 and the - change would make the API now also be allowed to return 409. - - See [#f2]_ for the 400, 403, 404 and 415 cases. - - - changing a status code on a particular response - - Example: changing the return code of an API from 501 to 400. - - .. note:: Fixing a bug so that a 400+ code is returned rather than a 500 or - 503 does not require a microversion change. It's assumed that clients are - not expected to handle a 500 or 503 response and therefore should not - need to opt-in to microversion changes that fixes a 500 or 503 response - from happening. - According to the OpenStack API Working Group, a - **500 Internal Server Error** should **not** be returned to the user for - failures due to user error that can be fixed by changing the request on - the client side. See [#f1]_. - - - new headers returned on a response - -The following flow chart attempts to walk through the process of "do -we need a microversion". - - -.. graphviz:: - - digraph states { - - label="Do I need a microversion?" - - silent_fail[shape="diamond", style="", group=g1, label="Did we silently - fail to do what is asked?"]; - ret_500[shape="diamond", style="", group=g1, label="Did we return a 500 - before?"]; - new_error[shape="diamond", style="", group=g1, label="Are we changing what - status code is returned?"]; - new_attr[shape="diamond", style="", group=g1, label="Did we add or remove an - attribute to a payload?"]; - new_param[shape="diamond", style="", group=g1, label="Did we add or remove - an accepted query string parameter or value?"]; - new_resource[shape="diamond", style="", group=g1, label="Did we add or remove a - resource url?"]; - - - no[shape="box", style=rounded, label="No microversion needed"]; - yes[shape="box", style=rounded, label="Yes, you need a microversion"]; - no2[shape="box", style=rounded, label="No microversion needed, it's - a bug"]; - - silent_fail -> ret_500[label=" no"]; - silent_fail -> no2[label="yes"]; - - ret_500 -> no2[label="yes [1]"]; - ret_500 -> new_error[label=" no"]; - - new_error -> new_attr[label=" no"]; - new_error -> yes[label="yes"]; - - new_attr -> new_param[label=" no"]; - new_attr -> yes[label="yes"]; - - new_param -> new_resource[label=" no"]; - new_param -> yes[label="yes"]; - - new_resource -> no[label=" no"]; - new_resource -> yes[label="yes"]; - - {rank=same; yes new_attr} - {rank=same; no2 ret_500} - {rank=min; silent_fail} - } - - -**Footnotes** - -.. [#f1] When fixing 500 errors that previously caused stack traces, try - to map the new error into the existing set of errors that API call - could previously return (400 if nothing else is appropriate). Changing - the set of allowed status codes from a request is changing the - contract, and should be part of a microversion (except in [#f2]_). - - The reason why we are so strict on contract is that we'd like - application writers to be able to know, for sure, what the contract is - at every microversion in Magnum. If they do not, they will need to write - conditional code in their application to handle ambiguities. - - When in doubt, consider application authors. If it would work with no - client side changes on both Magnum versions, you probably don't need a - microversion. If, on the other hand, there is any ambiguity, a - microversion is probably needed. - -.. [#f2] The exception to not needing a microversion when returning a - previously unspecified error code is the 400, 403, 404 and 415 cases. This is - considered OK to return even if previously unspecified in the code since - it's implied given keystone authentication can fail with a 403 and API - validation can fail with a 400 for invalid JSON request body. Request to - url/resource that does not exist always fails with 404. Invalid content types - are handled before API methods are called which results in a 415. - - .. note:: When in doubt about whether or not a microversion is required - for changing an error response code, consult the `Containers Team`_. - -.. _Containers Team: https://wiki.openstack.org/wiki/Meetings/Containers - - -When a microversion is not needed ---------------------------------- - -A microversion is not needed in the following situation: - -- the response - - - Changing the error message without changing the response code - does not require a new microversion. - - - Removing an inapplicable HTTP header, for example, suppose the Retry-After - HTTP header is being returned with a 4xx code. This header should only be - returned with a 503 or 3xx response, so it may be removed without bumping - the microversion. - -In Code -------- - -In ``magnum/api/controllers/base.py`` we define an ``@api_version`` decorator -which is intended to be used on top-level Controller methods. It is -not appropriate for lower-level methods. Some examples: - -Adding a new API method -~~~~~~~~~~~~~~~~~~~~~~~ - -In the controller class:: - - @base.Controller.api_version("1.2") - def my_api_method(self, req, id): - .... - -This method would only be available if the caller had specified an -``OpenStack-API-Version`` of >= ``1.2``. If they had specified a -lower version (or not specified it and received the default of ``1.1``) -the server would respond with ``HTTP/406``. - -Removing an API method -~~~~~~~~~~~~~~~~~~~~~~ - -In the controller class:: - - @base.Controller.api_version("1.2", "1.3") - def my_api_method(self, req, id): - .... - -This method would only be available if the caller had specified an -``OpenStack-API-Version`` of >= ``1.2`` and -``OpenStack-API-Version`` of <= ``1.3``. If ``1.4`` or later -is specified the server will respond with ``HTTP/406``. - -Changing a method's behavior -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the controller class:: - - @base.Controller.api_version("1.2", "1.3") - def my_api_method(self, req, id): - .... method_1 ... - - @base.Controller.api_version("1.4") #noqa - def my_api_method(self, req, id): - .... method_2 ... - -If a caller specified ``1.2``, ``1.3`` (or received the default -of ``1.1``) they would see the result from ``method_1``, -and for ``1.4`` or later they would see the result from ``method_2``. - -It is vital that the two methods have the same name, so the second of -them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The -two methods may be different in any kind of semantics (schema -validation, return values, response codes, etc) - -When not using decorators -~~~~~~~~~~~~~~~~~~~~~~~~~ - -When you don't want to use the ``@api_version`` decorator on a method -or you want to change behavior within a method (say it leads to -simpler or simply a lot less code) you can directly test for the -requested version with a method as long as you have access to the api -request object (commonly accessed with ``pecan.request``). Every API -method has an versions object attached to the request object and that -can be used to modify behavior based on its value:: - - def index(self): - - - req_version = pecan.request.headers.get(Version.string) - req1_min = versions.Version("1.1") - req1_max = versions.Version("1.5") - req2_min = versions.Version("1.6") - req2_max = versions.Version("1.10") - - if req_version.matches(req1_min, req1_max): - ....stuff.... - elif req_version.matches(req2min, req2_max): - ....other stuff.... - elif req_version > versions.Version("1.10"): - ....more stuff..... - - - -The first argument to the matches method is the minimum acceptable version -and the second is maximum acceptable version. If the specified minimum -version and maximum version are null then ``ValueError`` is returned. - -Other necessary changes ------------------------ - -If you are adding a patch which adds a new microversion, it is -necessary to add changes to other places which describe your change: - -* Update ``REST_API_VERSION_HISTORY`` in - ``magnum/api/controllers/versions.py`` - -* Update ``CURRENT_MAX_VER`` in - ``magnum/api/controllers/versions.py`` - -* Add a verbose description to - ``magnum/api/rest_api_version_history.rst``. There should - be enough information that it could be used by the docs team for - release notes. - -* Update the expected versions in affected tests, for example in - ``magnum/tests/unit/api/controllers/test_base.py``. - -* Make a new commit to python-magnumclient and update corresponding - files to enable the newly added microversion API. - -* If the microversion changes the response schema, a new schema and test for - the microversion must be added to Tempest. - -Allocating a microversion -------------------------- - -If you are adding a patch which adds a new microversion, it is -necessary to allocate the next microversion number. Except under -extremely unusual circumstances and this would have been mentioned in -the magnum spec for the change, the minor number of ``CURRENT_MAX_VER`` -will be incremented. This will also be the new microversion number for -the API change. - -It is possible that multiple microversion patches would be proposed in -parallel and the microversions would conflict between patches. This -will cause a merge conflict. We don't reserve a microversion for each -patch in advance as we don't know the final merge order. Developers -may need over time to rebase their patch calculating a new version -number as above based on the updated value of ``CURRENT_MAX_VER``. diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index 2aa07077..00000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,4 +0,0 @@ -============ -Contributing -============ -.. include:: ../../../CONTRIBUTING.rst diff --git a/doc/source/contributor/functional-test.rst b/doc/source/contributor/functional-test.rst deleted file mode 100644 index a4abc5e7..00000000 --- a/doc/source/contributor/functional-test.rst +++ /dev/null @@ -1,133 +0,0 @@ -======================== -Running functional tests -======================== - -This is a guide for developers who want to run functional tests in their local -machine. - -Prerequisite -============ - -You need to have a Magnum instance running somewhere. If you are using -devstack, follow the developer quickstart guide to deploy Magnum in a devstack -environment - -``_ - -Configuration -============= -The functional tests require a couple configuration files, so you'll need to -generate them yourself. - -For devstack ------------- -If you're using devstack, you can copy and modify the devstack configuration:: - - cd /opt/stack/magnum - cp /opt/stack/tempest/etc/tempest.conf /opt/stack/magnum/etc/tempest.conf - cp functional_creds.conf.sample functional_creds.conf - - # update the IP address - HOST=$(iniget /etc/magnum/magnum.conf api host) - sed -i "s/127.0.0.1/$HOST/" functional_creds.conf - - # update admin password - source /opt/stack/devstack/openrc admin admin - iniset functional_creds.conf admin pass $OS_PASSWORD - - # update demo password - source /opt/stack/devstack/openrc demo demo - iniset functional_creds.conf auth password $OS_PASSWORD - -Set the DNS name server to be used by your cluster nodes (e.g. 8.8.8.8):: - - # update DNS name server - source /opt/stack/devstack/openrc demo demo - iniset functional_creds.conf magnum dns_nameserver - -Create the necessary keypair and flavor:: - - source /opt/stack/devstack/openrc admin admin - openstack keypair create --public-key ~/.ssh/id_rsa.pub default - openstack flavor create --id 100 --ram 1024 --disk 10 --vcpus 1 m1.magnum - openstack flavor create --id 200 --ram 512 --disk 10 --vcpus 1 s1.magnum - - source /opt/stack/devstack/openrc demo demo - openstack keypair create --public-key ~/.ssh/id_rsa.pub default - -You may need to explicitly upgrade required packages if you've installed them -before and their versions become too old:: - - UPPER_CONSTRAINTS=/opt/stack/requirements/upper-constraints.txt - sudo pip install -c $UPPER_CONSTRAINTS -U -r test-requirements.txt - -Outside of devstack -------------------- -If you are not using devstack, you'll need to create the configuration files. -The /etc/tempest.conf configuration file is documented here - -``_ - -Here's a reasonable sample of tempest.conf settings you might need:: - - [auth] - use_dynamic_credentials=False - test_accounts_file=/tmp/etc/magnum/accounts.yaml - admin_username=admin - admin_password=password - admin_project_name=admin - - [identity] - disable_ssl_certificate_validation=True - uri=https://identity.example.com/v2.0 - auth_version=v2 - region=EAST - - [identity-feature-enabled] - api_v2 = true - api_v3 = false - trust = false - - [oslo_concurrency] - lock_path = /tmp/ - - [magnum] - image_id=22222222-2222-2222-2222-222222222222 - nic_id=11111111-1111-1111-1111-111111111111 - keypair_id=default - flavor_id=small - magnum_url=https://magnum.example.com/v1 - - [debug] - trace_requests=true - -A sample functional_creds.conf can be found in the root of this project named -functional_creds.conf.sample - -When you run tox, be sure to specify the location of your tempest.conf using -TEMPEST_CONFIG_DIR:: - - export TEMPEST_CONFIG_DIR=/tmp/etc/magnum/ - tox -e functional-api - -Execution -========= - -Magnum has different functional tests for each COE and for the API. -All the environments are detailed in Magnum's tox.ini:: - - cat tox.ini | grep functional- | awk -F: '{print $2}' | sed s/]// - -To run a particular subset of tests, specify that group as a tox environment. -For example, here is how you would run all of the kubernetes tests:: - - tox -e functional-k8s - -To run a specific test or group of tests, specify the test path as a positional argument:: - - tox -e functional-k8s -- magnum.tests.functional.k8s.v1.test_k8s_python_client.TestBayModelResource - -To avoid creating multiple clusters simultaneously, you can execute the tests -with concurrency 1:: - - tox -e functional-swarm -- --concurrency 1 diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index 2c577c1c..00000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -Contributor's Guide -=================== - -Getting Started ---------------- - -If you are new to Magnum, this section contains information that should help -you get started as a developer working on the project or contributing to the -project. - -.. toctree:: - :maxdepth: 1 - - Developer Contribution Guide - Setting Up Your Development Environment - Running Tempest Tests - -There are some other important documents also that helps new contributors to -contribute effectively towards code standards to the project. - -.. toctree:: - :maxdepth: 1 - - Writing a Release Note - Adding a New API Method - Changing Magnum DB Objects - api-microversion-history - policies diff --git a/doc/source/contributor/objects.rst b/doc/source/contributor/objects.rst deleted file mode 100644 index a3726b60..00000000 --- a/doc/source/contributor/objects.rst +++ /dev/null @@ -1,111 +0,0 @@ -.. - Copyright 2015 IBM Corp. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Versioned Objects -================= - -Magnum uses the `oslo.versionedobjects library -`_ to -construct an object model that can be communicated via RPC. These objects have -a version history and functionality to convert from one version to a previous -version. This allows for 2 different levels of the code to still pass objects -to each other, as in the case of rolling upgrades. - -Object Version Testing ----------------------- - -In order to ensure object versioning consistency is maintained, -oslo.versionedobjects has a fixture to aid in testing object versioning. -`oslo.versionedobjects.fixture.ObjectVersionChecker -`_ -generates fingerprints of each object, which is a combination of the current -version number of the object, along with a hash of the RPC-critical parts of -the object (fields and remotable methods). - -The tests hold a static mapping of the fingerprints of all objects. When an -object is changed, the hash generated in the test will differ from that held in -the static mapping. This will signal to the developer that the version of the -object needs to be increased. Following this version increase, the fingerprint -that is then generated by the test can be copied to the static mapping in the -tests. This symbolizes that if the code change is approved, this is the new -state of the object to compare against. - -Object Change Example -''''''''''''''''''''' - -The following example shows the unit test workflow when changing an object -(Cluster was updated to hold a new 'foo' field):: - - tox -e py27 magnum.tests.unit.objects.test_objects - -This results in a unit test failure with the following output: - - .. code-block:: python - - testtools.matchers._impl.MismatchError: !=: - reference = {'Cluster': '1.0-35edde13ad178e9419e7ea8b6d580bcd'} - actual = {'Cluster': '1.0-22b40e8eed0414561ca921906b189820'} - - .. code-block:: console - - : Fields or remotable methods in some objects have changed. Make sure the versions of the objects has been bumped, and update the hashes in the static fingerprints tree (object_data). For more information, read http://docs.openstack.org/developer/magnum/objects.html. - -This is an indication that me adding the 'foo' field to Cluster means I need -to bump the version of Cluster, so I increase the version and add a comment -saying what I changed in the new version: - - .. code-block:: python - - @base.MagnumObjectRegistry.register - class Cluster(base.MagnumPersistentObject, base.MagnumObject, - base.MagnumObjectDictCompat): - # Version 1.0: Initial version - # Version 1.1: Added 'foo' field - VERSION = '1.1' - -Now that I have updated the version, I will run the tests again and let the -test tell me the fingerprint that I now need to put in the static tree: - - .. code-block:: python - - testtools.matchers._impl.MismatchError: !=: - reference = {'Cluster': '1.0-35edde13ad178e9419e7ea8b6d580bcd'} - actual = {'Cluster': '1.1-22b40e8eed0414561ca921906b189820'} - -I can now copy the new fingerprint needed -(1.1-22b40e8eed0414561ca921906b189820), to the object_data map within -magnum/tests/unit/objects/test_objects.py: - - .. code-block:: python - - object_data = { - 'Cluster': '1.1-22b40e8eed0414561ca921906b189820', - 'ClusterTemplate': '1.0-06863f04ab4b98307e3d1b736d3137bf', - 'Certificate': '1.0-69b579203c6d726be7878c606626e438', - 'MyObj': '1.0-b43567e512438205e32f4e95ca616697', - 'X509KeyPair': '1.0-fd008eba0fbc390e0e5da247bba4eedd', - 'MagnumService': '1.0-d4b8c0f3a234aec35d273196e18f7ed1', - } - -Running the unit tests now shows no failure. - -If I did not update the version, and rather just copied the new hash to the -object_data map, the review would show the hash (but not the version) was -updated in object_data. At that point, a reviewer should point this out, and -mention that the object version needs to be updated. - -If a remotable method were added/changed, the same process is followed, because -this will also cause a hash change. diff --git a/doc/source/contributor/policies.rst b/doc/source/contributor/policies.rst deleted file mode 100644 index ace07fca..00000000 --- a/doc/source/contributor/policies.rst +++ /dev/null @@ -1,525 +0,0 @@ -########################### -Magnum Development Policies -########################### -.. contents:: - -Magnum is made possible by a wide base of contributors from numerous -countries and time zones around the world. We work as a team in accordance -with the `Guiding Principles -`_ of the -OpenStack Community. We all want to be valued members of a successful team -on an inspiring mission. Code contributions are merged into our code base -through a democratic voting process. Anyone may vote on patches submitted -by our contributors, and everyone is encouraged to make actionable and -helpful suggestions for how patches can be improved prior to merging. We -strive to strike a sensible balance between the speed of our work, and -the quality of each contribution. This document describes the correct -balance in accordance with the prevailing wishes of our team. - -This document is an extension of the `OpenStack Governance -`_ that explicitly converts our tribal -knowledge into a codified record. If any conflict is discovered between -the OpenStack governance, and this document, the OpenStack documents shall -prevail. - -********************* -Team Responsibilities -********************* - -Responsibilities for Everyone -============================= -`Everyone` in our community is expected to know and comply with the -`OpenStack Community Code of Conduct -`_. -We all need to work together to maintain a thriving team that enjoys working -together to solve challenges. - -Responsibilities for Contributors -================================= -When making contributions to any Magnum code repository, contributors shall -expect their work to be peer reviewed. See `Merge Criteria`_ for details -about how reviewed code is approved for merge. - -Expect reviewers to vote against merging a patch, along with actionable -suggestions for improvement prior to merging the code. Understand that such -a vote is normal, and is essential to our quality process. - -If you receive votes against your review submission, please revise your -work in accordance with any requests, or leave comments indicating why you -believe the work should be further considered without revision. - -If you leave your review without further comments or revision for an extended -period, you should mark your patch as `Abandoned`, or it may be marked as -`Abandoned` by another team member as a courtesy to you. A patch with no -revisions for multiple weeks should be abandoned, or changed to work in -progress (WIP) with the `workflow-1` flag. We want all code in the review -queue to be actionable by reviewers. Note that an `Abandoned` status shall -be considered temporary, and that your patch may be restored and revised -if and when you are ready to continue working on it. Note that a core -reviewer may un-abandon a patch to allow subsequent revisions by you or -another contributor, as needed. - -When making revisions to patches, please acknowledge and confirm each -previous review comment as Done or with an explanation for why the -comment was not addressed in your subsequent revision. - -Summary of Contributor Responsibilities ---------------------------------------- -* Includes the `Everyone` responsibilities, plus: -* Recognize that revisions are a normal part of our review process. -* Make revisions to your patches to address reviewer comments. -* Mark each inline comment as `Done` once it has been addressed. -* Indicate why any requests have not been acted upon. -* Set `workflow-1` until a patch is ready for merge consideration. -* Consider patches without requested revisions as abandoned after a few weeks. - -Responsibilities for Reviewers -============================== -Each reviewer is responsible for upholding the quality of our code. -By making constructive and actionable requests for revisions to patches, -together we make better software. When making requests for revisions, -each reviewer shall carefully consider our aim to merge contributions in -a timely manner, while improving them. **Contributions do not need to be -perfect in order to be merged.** You may make comments with a "0" vote to -call out stylistic preferences that will not result in a material change -to the software if/when resolved. - -If a patch improves our code but has been through enough revisions that -delaying it further is worse than including it now in imperfect form, you -may file a tech-debt bug ticket against the code, and vote to merge the -imperfect patch. - -When a reviewer requests a revision to a patch, he or she is expected to -review the subsequent revision to verify the change addressed the concern. - -Summary of Reviewer Responsibilities ------------------------------------- -* Includes the Everyone responsibilities, plus: -* Uphold quality of our code. -* Provide helpful and constructive requests for patch revisions. -* Carefully balance need to keep moving while improving contributions. -* Submit tech-debt bugs to merge imperfect code with known problems. -* Review your requested revisions to verify them. - -Responsibilities for Core Reviewers -=================================== -Core reviewers have all the responsibilities mentioned above, as well as -a responsibility to judge the readiness of a patch for merge, and to set -the `workflow+1` flag to order a patch to be merged once at least one -other core reviewers has issued a +2 vote. See: `Merge Criteria`_. - -Reviewers who use the -2 vote shall: - -1. Explain what scenarios can/will lift the -2 or downgrade it to a -1 - (non-sticky), or explain "this is unmergable for reason ". - Non-negotiable reasons such as breaks API contract, or introduces - fundamental security issues are acceptable. -2. Recognize that a -2 needs more justification than a -1 does. Both - require actionable notes, but a -2 comment shall outline the reason - for the sticky vote rather than a -1. -3. Closely monitor comments and revisions to that review so the vote is - promptly downgraded or removed once addressed by the contributor. - -All core reviewers shall be responsible for setting a positive and welcoming -tone toward other reviewers and contributors. - -Summary of Core Reviewer Responsibilities ------------------------------------------ -* Includes the Reviewer responsibilities, plus: -* Judge readiness of patches for merge. -* Approve patches for merge when requirements are met. -* Set a positive and welcoming tone toward other reviewers and contributors. - -PTL Responsibilities -==================== -In accordance with our `Project Team Guide for PTLs -`_ -our PTL carries all the responsibilities referenced above plus: - -* Select and target blueprints for each release cycle. -* Determine Team Consensus. Resolve disagreements among our team. -* May delegate his/her responsibilities to others. -* Add and remove core reviewers in accordance with his/her judgement. - * Note that in accordance with the Project Team Guide, selection or - removal of core reviewers is not a democratic process. - * Our PTL shall maintain a core reviewer group that works well together - as a team. Our PTL will seek advice from our community when making - such changes, but ultimately decides. - * Clearly communicate additions to the developer mailing list. - -########################## -Our Development Philosophy -########################## -******** -Overview -******** -* Continuous iterative improvements. -* Small contributions preferred. -* Perfect is the enemy of good. -* We need a compass, not a master plan. - -********** -Discussion -********** -We believe in making continuous iterative improvements to our software. -Making several small improvements is preferred over making fewer large -changes. Contributions of about perhaps 400 lines of change or less are -considered ideal because they are easier to review. This makes them -more efficient from a review perspective than larger contributions are, -because they get reviewed more quickly, and are faster to revise than -larger works. We also encourage unrelated changes to be contributed in -separate patches to make reasoning about each one simpler. - -Although we should strive for perfection in our work, we must recognize that -what matters more than absolute perfection is that our software is -consistently improving over time. When contributions are slowed down by too -many revisions, we should decide to merge code even when it is imperfect, -as long as we have systematically tracked the weaknesses so we can revisit -them with subsequent revision efforts. - -Rule of Thumb -============= -Our rule of thumb shall be the answer to two simple questions: - -1. Is this patch making Magnum better? -2. Will this patch cause instability, or prevent others from using Magnum - effectively? - -If the answers respectively are *yes* and *no*, and our objections can be -effectively addressed in a follow-up patch, then we should decide to merge -code with tech-debt bug tickets to systematically track our desired -improvements. - -********************* -How We Make Decisions -********************* -Team Consensus -============== -On the Magnum team, we rely on Team Consensus to make key decisions. -Team Consensus is the harmonious and peaceful agreement of the majority -of our participating team. That means that we seek a clear indication of -agreement of those engaged in discussion of a topic. Consensus shall not -be confused with the concept of Unanimous Consent where all participants -are in full agreement. Our decisions do not require Unanimous Consent. We -may still have a team consensus even if we have a small number of team -members who disagree with the majority viewpoint. We must recognize that -we will not always agree on every key decision. What's more important than -our individual position on an argument is that the interests of our team -are met. - -We shall take reasonable efforts to address all opposition by fairly -considering it before making a decision. Although Unanimous Consent -is not required to make a key decision, we shall not overlook legitimate -questions or concerns. Once each such concern has been addressed, we may -advance to making a determination of Team Consensus. - -Some code level changes are controversial in nature. If this happens, and -a core reviewer judges the minority viewpoint to be reasonably considered, -he or she may conclude we have Team Consensus and approve the patch for -merge using the normal voting guidelines. We shall allow reasonable time -for discussion and socialization when controversial decisions are considered. - -If any contributor disagrees with a merged patch, and believes our decision -should be reconsidered, (s)he may consult our `Reverting Patches`_ -guidelines. - -No Deadlocks -============ -We shall not accept any philosophy of "agree to disagree". This form of -deadlock is not decision making, but the absence of it. Instead, we shall -proceed to decision making in a timely fashion once all input has been -fairly considered. We shall accept when a decision does not go our way. - -Handling Disagreement -===================== -When we disagree, we shall first consult the -`OpenStack Community Code of Conduct -`_ for guidance. -In accordance with our code of conduct, our disagreements shall be handled -with patience, respect, and fair consideration for those who don't share -the same point of view. When we do not agree, we take care to ask why. We -strive to understand the reasons we disagree, and seek opportunities to -reach a compromise. - -Our PTL is responsible for determining Team Consensus when it can not be -reached otherwise. In extreme cases, it may be possible to appeal a PTL -decision to the `OpenStack TC -`_. - -******************* -Open Design Process -******************* -One of the `four open -`_ -principles embraced by the OpenStack community is Open Design. We -collaborate openly to design new features and capabilities, as well as -planning major improvements to our software. We use multiple venues to -conduct our design, including: - -* Written specifications -* Blueprints -* Bug tickets -* PTG meetings -* Summit meetings -* IRC meetings -* Mailing list discussions -* Review comments -* IRC channel discussion - -The above list is ordered by formality level. Notes and/or minutes from -meetings shall be recorded in etherpad documents so they can be accessed -by participants not present in the meetings. Meetings shall be open, and -shall not intentionally exclude any stakeholders. - -Specifications -============== -The most formal venue for open design are written specifications. These -are RST format documents that are proposed in the magnum-specs code -repository by release cycle name. The repository holds a template for -the format of the document, as required by our PTL for each release cycle. - -Specifications are intended to be a high level description of a major -feature or capability, expressed in a way to demonstrate that the feature -has been well contemplated, and is acceptable by Team Consensus. Using -specifications allows us to change direction without requiring code rework -because input can be considered before code has been written. - -Specifications do not require specific implementation details. They shall -describe the implementation in enough detail to give reviewers a high level -sense of what to expect, with examples to make new concepts clear. We do -not require specifications that detail every aspect of the implementation. -We recognize that it is more effective to express implementations with -patches than conveying them in the abstract. If a proposed patch set for -an implementation is not acceptable, we can address such concerns using -review comments on those patches. If a reviewer has an alternate idea for -implementation, they are welcome to develop another patch in WIP or -completed form to demonstrate an alternative approach for consideration. -This option for submitting an alternative review is available for alternate -specification ideas that reach beyond the scope of a simple review comment. -Offering reviewers multiple choices for contributions is welcome, and is -not considered wasteful. - -Implementations of features do not require merged specifications. However, -major features or refactoring should be expressed in a specification so -reviewers will know what to expect prior to considering code for review. -Contributors are welcome to start implementation before the specifications -are merged, but should be ready to revise the implementation as needed to -conform with changes in the merged specification. - -Reviews -======= -A review is a patch set that includes a proposal for inclusion in our code -base. We follow the process outlined in the `Code Review -`_ -section of the `OpenStack Developer's Guide -`_. -The following workflow states may by applied to each review: - -========== ================== ============================================= -State Meaning Detail -========== ================== ============================================= -workflow-1 Work in progress This patch is submitted for team input, - but should not yet be considered for merge. - May be set by a core reviewer as a courtesy. - It can be set after workflow+1 but prior to - merge in order to prevent a gate breaking - merge. -workflow-0 Ready for reviews This patch should be considered for merge. -workflow+1 Approved This patch has received at least two +2 - votes, and is approved for merge. Also - known as a "+A" vote. -========== ================== ============================================= - -The following votes may be applied to a review: - -====== ==================================================================== - Vote Meaning -====== ==================================================================== - -2 Do Not Merge - * WARNING: Use extreme caution applying this vote, because - contributors perceive this action as hostile unless it is - accompanied with a genuine offer to help remedy a critical - concern collaboratively. - * This vote is a veto that indicates a critical problem with - the contribution. It is sticky, meaning it must be removed - by the individual who added it, even if further revisions - are made. - * All -2 votes shall be accompanied with a polite comment that - clearly states what can be changed by the contributor to result - in reversal or downgrade of the vote to a -1. - * Core reviewers may use this vote: - * To indicate a critical problem to address, such as a - security vulnerability that other core reviewers may be - unable to recognize. - * To indicate a decision that the patch is not consistent - with the direction of the project, subsequent to conference - with the PTL about the matter. - * The PTL may use this vote: - * To indicate a decision that the patch is not consistent - with the direction of the project. - * While coordinating a release to prevent incompatible changes - from merging before the release is tagged. - * To address a critical concern with the contribution. - * Example uses of this vote that are not considered appropriate: - * To ensure more reviews before merge. - * To block competing patches. - * In cases when you lack the time to follow up closely afterward. - * To avoid a -2 vote on your contribution, discuss your plans - with the development team prior to writing code, and post a - WIP (`workflow-1`) patch while you are working on it, and ask - for input before you submit it for merge review. - -1 This patch needs further work before it can be merged - * This vote indicates an opportunity to make our code better - before it is merged. - * It asks the submitter to make a revision in accordance with - your feedback before core reviewers should consider this code - for merge. - * This vote shall be accompanied with constructive and actionable - feedback for how to improve the submission. - * If you use a -1 vote to ask a question, and the contributor - answers the question, please respond acknowledging the answer. - Either change your vote or follow up with additional rationale - for why this should remain a -1 comment. - * These votes will be cleared when you make a revision to a patch - set, and resubmit it for review. - * NOTE: Upon fair consideration of the viewpoint shared with this - vote, reviewers are encouraged to vote in accordance with their - own view of the contribution. This guidance applies when any - reviewer (PTL, core, etc.) has voted against it. Such opposing - views must be freely expressed to reach Team Consensus. When you - agree with a -1 vote, you may also vote -1 on the review to - echo the same concern. - 0 No Score - * Used to make remarks or ask questions that may not require a - revision to answer. - * Used to confirm that your prior -1 vote concern was addressed. - +1 Looks good to me, but someone else must approve - * Used to validate the quality of a contribution and express - agreement with the implementation. - * Resist the temptation to blindly +1 code without reviewing - it in sufficient detail to form an opinion. - * A core reviewer may use this if they: - * Provided a revision to the patch to fix something, but agree - with the rest of the patch. - * Agree with the patch but have outstanding questions that - do not warrant a -1 but would be nice to have answered. - * Agree with the patch with some uncertainty before using - a +2. It can indicate support while awaiting test results - or additional input from others. - +2 Looks good to me (core reviewer) - * Used by core reviewers to indicate acceptance of the patch - in its current form. - * Two of these votes are required for +A. - * Apply our `Rule of Thumb`_ - +A Approval for merge - * This means setting the workflow+1 state, and is typically - added together with the final +2 vote upon `Merge Criteria`_ - being met. -====== ==================================================================== - -Merge Criteria --------------- -We want code to merge relatively quickly in order to keep a rapid pace of -innovation. Rather than asking reviewers to wait a prescribed arbitrary -time before merging patches, we instead use a simple `2 +2s` policy for -approving new code for merge. The following criteria apply when judging -readiness to merge a patch: - -1. All contributions shall be peer reviewed and approved with a +2 vote by - at least two core reviewers prior to being merged. Exceptions known as - `Fast Merge`_ commits may bypass peer review as allowed by this policy. -2. The approving reviewer shall verify that all open questions and concerns - have been adequately addressed prior to voting +A by adding the - workflow+1 to merge a patch. This judgement verifies that - `Team Consensus`_ has been reached. - -Note: We discourage any `workflow+1` vote on patches that only have two +2 -votes from cores from the same affiliation. This guideline applies when -reviewer diversity allows for it. - -See `Reverting Patches`_ for details about how to remedy mistakes when code -is merged too quickly. - -Reverting Patches ------------------ -Moving quickly with our `Merge Criteria`_ means that sometimes we might -make mistakes. If we do, we may revert problematic patches. The following -options may be applied: - -1. Any contributor may revert a change by submitting a patch to undo the - objection and include a reference to the original patch in the - commit message. The commit message shall include clear rationale for - considering the revert. Normal voting rules apply. -2. Any contributor may re-implement a feature using an alternate approach - at any time, even after a previous implementation has merged. Normal - voting rules apply. -3. If a core reviewer wishes to revert a change (s)he may use the options - described above, or may apply the `Fast Revert`_ policy. - -Fast Merge ----------- -Sometimes we need to merge code quickly by bypassing the peer review process -when justified. Allowed exceptions include: - -* PTL (Project Team Lead) Intervention / Core intervention - * Emergency un-break gate. - * `VMT `_ embargoed - patch submitted to Gerrit. -* Automatic proposals (e.g. requirements updates). -* PTL / Core discretion (with comment) that a patch already received a - +2 but minor (typo/rebase) fixes were addressed by another core reviewer - and the `correcting` reviewer has opted to carry forward the other +2. - The `correcting` reviewer shall not be the original patch submitter. - -We recognize that mistakes may happen when changes are merged quickly. When -concerns with any `Fast Merge` surface, our `Fast Revert`_ policy may be -applied. - -Fast Revert ------------ -This policy was adapted from nova's `Reverts for Retrospective Vetos -`_ policy in 2017. -Sometimes our simple `2 +2s` approval policy will result in errors when we -move quickly. These errors might be a bug that was missed, or equally -importantly, it might be that other cores feel that there is a need for -further discussion on the implementation of a given piece of code. - -Rather than an enforced time-based solution - for example, a patch could -not be merged until it has been up for review for 3 days - we have chosen -an honor-based system of `Team Consensus`_ where core reviewers do not -approve controversial patches until proposals are sufficiently socialized -and everyone has a chance to raise any concerns. - -Recognizing that mistakes can happen, we also have a policy where contentious -patches which were quickly approved may be reverted so that the discussion -around the proposal may continue as if the patch had never been merged in the -first place. In such a situation, the procedure is: - -1. The commit to be reverted must not have been released. -2. The core team member who has a -2 worthy objection may propose a - revert, stating the specific concerns that they feel need addressing. -3. Any subsequent patches depending on the to-be-reverted patch shall be - reverted also, as needed. -4. Other core team members shall quickly approve the revert. No detailed - debate is needed at this point. A -2 vote on a revert is strongly - discouraged, because it effectively blocks the right of cores approving - the revert from -2 voting on the original patch. -5. The original patch submitter may re-submit the change, with a reference - to the original patch and the revert. -6. The original reviewers of the patch shall restore their votes and attempt - to summarize their previous reasons for their votes. -7. The patch shall not be re-approved until the concerns of the opponents - are fairly considered. A mailing list discussion or design spec may be - the best way to achieve this. - -This policy shall not be used in situations where `Team Consensus`_ was -fairly reached over a reasonable period of time. A `Fast Revert` applies -only to new concerns that were not part of the `Team Consensus`_ -determination when the patch was merged. - -See also: `Team Consensus`_. - -Continuous Improvement -====================== -If any part of this document is not clear, or if you have suggestions for -how to improve it, please contact our PTL for help. diff --git a/doc/source/contributor/quickstart.rst b/doc/source/contributor/quickstart.rst deleted file mode 100644 index 17a26d8c..00000000 --- a/doc/source/contributor/quickstart.rst +++ /dev/null @@ -1,743 +0,0 @@ -.. _quickstart: - -===================== -Developer Quick-Start -===================== - -This is a quick walkthrough to get you started developing code for magnum. -This assumes you are already familiar with submitting code reviews to an -OpenStack project. - -.. seealso:: - - http://docs.openstack.org/infra/manual/developers.html - -Setup Dev Environment -===================== - -Install OS-specific prerequisites:: - - # Ubuntu Xenial: - sudo apt update - sudo apt install -y python-dev libssl-dev libxml2-dev curl \ - libmysqlclient-dev libxslt-dev libpq-dev git \ - libffi-dev gettext build-essential python3-dev - - # Fedora/RHEL: - sudo yum install -y python-devel openssl-devel mysql-devel curl \ - libxml2-devel libxslt-devel postgresql-devel git \ - libffi-devel gettext gcc - - # openSUSE/SLE 12: - sudo zypper --non-interactive install git libffi-devel curl \ - libmysqlclient-devel libopenssl-devel libxml2-devel \ - libxslt-devel postgresql-devel python-devel \ - gettext-runtime - -Install pip:: - - curl -s https://bootstrap.pypa.io/get-pip.py | sudo python - -Install common prerequisites:: - - sudo pip install virtualenv flake8 tox testrepository git-review - -You may need to explicitly upgrade virtualenv if you've installed the one -from your OS distribution and it is too old (tox will complain). You can -upgrade it individually, if you need to:: - - sudo pip install -U virtualenv - -Magnum source code should be pulled directly from git:: - - # from your home or source directory - cd ~ - git clone https://git.openstack.org/openstack/magnum - cd magnum - -All unit tests should be run using tox. To run magnum's entire test suite:: - - # run all tests (unit and pep8) - tox - -To run a specific test, use a positional argument for the unit tests:: - - # run a specific test for Python 2.7 - tox -epy27 -- test_conductor - -You may pass options to the test programs using positional arguments:: - - # run all the Python 2.7 unit tests (in parallel!) - tox -epy27 -- --parallel - -To run only the pep8/flake8 syntax and style checks:: - - tox -epep8 - -To run unit test coverage and check percentage of code covered:: - - tox -e cover - -To discover and interact with templates, please refer to -``_ - -Exercising the Services Using Devstack -====================================== - -Devstack can be configured to enable magnum support. It is easy to develop -magnum with the devstack environment. Magnum depends on nova, glance, heat and -neutron to create and schedule virtual machines to simulate bare-metal (full -bare-metal support is under active development). - -**NOTE:** Running devstack within a virtual machine with magnum enabled is not -recommended at this time. - -This session has only been tested on Ubuntu 16.04 (Xenial) and Fedora 20/21. -We recommend users to select one of them if it is possible. - -Clone devstack:: - - # Create a root directory for devstack if needed - sudo mkdir -p /opt/stack - sudo chown $USER /opt/stack - - git clone https://git.openstack.org/openstack-dev/devstack /opt/stack/devstack - -We will run devstack with minimal local.conf settings required to enable -magnum, heat, and neutron (neutron is enabled by default in devstack since -Kilo, and heat must be enabled by yourself):: - - $ cat > /opt/stack/devstack/local.conf << END - [[local|localrc]] - DATABASE_PASSWORD=password - RABBIT_PASSWORD=password - SERVICE_TOKEN=password - SERVICE_PASSWORD=password - ADMIN_PASSWORD=password - # magnum requires the following to be set correctly - PUBLIC_INTERFACE=eth1 - - # Enable barbican service and use it to store TLS certificates - # For details https://docs.openstack.org/developer/magnum/userguide.html#transport-layer-security - enable_plugin barbican https://git.openstack.org/openstack/barbican - - enable_plugin heat https://git.openstack.org/openstack/heat - - # Enable magnum plugin after dependent plugins - enable_plugin magnum https://git.openstack.org/openstack/magnum - - # Optional: uncomment to enable the Magnum UI plugin in Horizon - #enable_plugin magnum-ui https://github.com/openstack/magnum-ui - - VOLUME_BACKING_FILE_SIZE=20G - END - -**NOTE:** Update PUBLIC_INTERFACE as appropriate for your system. - -**NOTE:** Enable heat plugin is necessary. - -Optionally, you can enable neutron/lbaas v2 with octavia to create load -balancers for multi master clusters:: - - $ cat >> /opt/stack/devstack/local.conf << END - enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas - enable_plugin octavia https://git.openstack.org/openstack/octavia - - # Disable LBaaS(v1) service - disable_service q-lbaas - # Enable LBaaS(v2) services - enable_service q-lbaasv2 - enable_service octavia - enable_service o-cw - enable_service o-hk - enable_service o-hm - enable_service o-api - END - -Optionally, you can enable ceilometer in devstack. If ceilometer is enabled, -magnum will periodically send metrics to ceilometer:: - - $ cat >> /opt/stack/devstack/local.conf << END - enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer - END - -If you want to deploy Docker Registry 2.0 in your cluster, you should enable -swift in devstack:: - - $ cat >> /opt/stack/devstack/local.conf << END - enable_service s-proxy - enable_service s-object - enable_service s-container - enable_service s-account - END - -More devstack configuration information can be found at -http://docs.openstack.org/developer/devstack/configuration.html - -More neutron configuration information can be found at -http://docs.openstack.org/developer/devstack/guides/neutron.html - -Run devstack:: - - cd /opt/stack/devstack - ./stack.sh - -**NOTE:** This will take a little extra time when the Fedora Atomic micro-OS -image is downloaded for the first time. - -At this point, two magnum process (magnum-api and magnum-conductor) will be -running on devstack screens. If you make some code changes and want to -test their effects, just stop and restart magnum-api and/or magnum-conductor. - -Prepare your session to be able to use the various openstack clients including -magnum, neutron, and glance. Create a new shell, and source the devstack openrc -script:: - - source /opt/stack/devstack/openrc admin admin - -Magnum has been tested with the Fedora Atomic micro-OS and CoreOS. Magnum will -likely work with other micro-OS platforms, but each requires individual -support in the heat template. - -The Fedora Atomic micro-OS image will automatically be added to glance. You -can add additional images manually through glance. To verify the image created -when installing devstack use:: - - $ openstack image list - - +--------------------------------------+----------------------+ - | ID | Name | - +--------------------------------------+----------------------+ - | d6ab5b70-c866-4697-ad8c-f40dbd18eaec | cirros-0.3.4.img | - | 71fe76a1-58fb-45e9-89a4-6d772fefea07 | fedora-atomic-latest | - | 7d5acefc-9766-47be-9bff-7af5d6fbef35 | cirros | - +--------------------------------------+----------------------+ - -To list the available commands and resources for magnum, use:: - - magnum help - -To list out the health of the internal services, namely conductor, of magnum, -use:: - - $ magnum service-list - - +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ - | id | host | binary | state | disabled | disabled_reason | created_at | updated_at | - +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ - | 1 | oxy-dev.hq1-0a5a3c02.hq1.abcde.com | magnum-conductor | up | | - | 2016-08-31T10:03:36+00:00 | 2016-08-31T10:11:41+00:00 | - +----+---------------------------------------+------------------+-------+----------+-----------------+---------------------------+---------------------------+ - -Create a keypair for use with the ClusterTemplate:: - - test -f ~/.ssh/id_rsa.pub || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa - nova keypair-add --pub-key ~/.ssh/id_rsa.pub testkey - -Check a dns server can resolve a host name properly:: - - dig @ +short - -For example:: - - $ dig www.openstack.org @8.8.8.8 +short - www.openstack.org.cdn.cloudflare.net. - 104.20.64.68 - 104.20.65.68 - -Building a Kubernetes Cluster - Based on Fedora Atomic -====================================================== - -Create a ClusterTemplate. This is similar in nature to a flavor and describes -to magnum how to construct the cluster. The ClusterTemplate specifies a Fedora -Atomic image so the clusters which use this ClusterTemplate will be based on -Fedora Atomic. The COE (Container Orchestration Engine) and keypair need to -be specified as well:: - - magnum cluster-template-create k8s-cluster-template \ - --image fedora-atomic-latest \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --docker-volume-size 5 \ - --network-driver flannel \ - --coe kubernetes - -Create a cluster. Use the ClusterTemplate name as a template for cluster -creation. This cluster will result in one master kubernetes node and one minion -node:: - - magnum cluster-create k8s-cluster \ - --cluster-template k8s-cluster-template \ - --node-count 1 - -Clusters will have an initial status of CREATE_IN_PROGRESS. Magnum will update -the status to CREATE_COMPLETE when it is done creating the cluster. Do not -create containers, pods, services, or replication controllers before magnum -finishes creating the cluster. They will likely not be created, and may cause -magnum to become confused. - -The existing clusters can be listed as follows:: - - $ magnum cluster-list - - +--------------------------------------+-------------+------------+--------------+-----------------+ - | uuid | name | node_count | master_count | status | - +--------------------------------------+-------------+------------+--------------------------------+ - | 9dccb1e6-02dc-4e2b-b897-10656c5339ce | k8s-cluster | 1 | 1 | CREATE_COMPLETE | - +--------------------------------------+-------------+------------+--------------+-----------------+ - -More detailed information for a given cluster is obtained via:: - - magnum cluster-show k8s-cluster - -After a cluster is created, you can dynamically add/remove node(s) to/from the -cluster by updating the node_count attribute. For example, to add one more -node:: - - magnum cluster-update k8s-cluster replace node_count=2 - -Clusters in the process of updating will have a status of UPDATE_IN_PROGRESS. -Magnum will update the status to UPDATE_COMPLETE when it is done updating -the cluster. - -**NOTE:** Reducing node_count will remove all the existing pods on the nodes -that are deleted. If you choose to reduce the node_count, magnum will first -try to remove empty nodes with no pods running on them. If you reduce -node_count by more than the number of empty nodes, magnum must remove nodes -that have running pods on them. This action will delete those pods. We -strongly recommend using a replication controller before reducing the -node_count so any removed pods can be automatically recovered on your -remaining nodes. - -Heat can be used to see detailed information on the status of a stack or -specific cluster: - -To check the list of all cluster stacks:: - - openstack stack list - -To check an individual cluster's stack:: - - openstack stack show - -Monitoring cluster status in detail (e.g., creating, updating):: - - CLUSTER_HEAT_NAME=$(openstack stack list | \ - awk "/\sk8s-cluster-/{print \$4}") - echo ${CLUSTER_HEAT_NAME} - openstack stack resource list ${CLUSTER_HEAT_NAME} - -Building a Kubernetes Cluster - Based on CoreOS -=============================================== - -You can create a Kubernetes cluster based on CoreOS as an alternative to -Atomic. First, download the official CoreOS image:: - - wget http://beta.release.core-os.net/amd64-usr/current/coreos_production_openstack_image.img.bz2 - bunzip2 coreos_production_openstack_image.img.bz2 - -Upload the image to glance:: - - openstack image create CoreOS \ - --public \ - --disk-format=qcow2 \ - --container-format=bare \ - --property os_distro=coreos \ - --file=coreos_production_openstack_image.img - -Create a CoreOS Kubernetes ClusterTemplate, which is similar to the Atomic -Kubernetes ClusterTemplate, except for pointing to a different image:: - - magnum cluster-template-create k8s-cluster-template-coreos \ - --image CoreOS \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --network-driver flannel \ - --coe kubernetes - -Create a CoreOS Kubernetes cluster. Use the CoreOS ClusterTemplate as a -template for cluster creation:: - - magnum cluster-create k8s-cluster \ - --cluster-template k8s-cluster-template-coreos \ - --node-count 2 - -Using a Kubernetes Cluster -========================== - -**NOTE:** For the following examples, only one minion node is required in the -k8s cluster created previously. - -Kubernetes provides a number of examples you can use to check that things are -working. You may need to download kubectl binary for interacting with k8s -cluster using:: - - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl - chmod +x ./kubectl - sudo mv ./kubectl /usr/local/bin/kubectl - -We first need to setup the certs to allow Kubernetes to authenticate our -connection. Please refer to -``_ -for more info on using TLS keys/certs which are setup below. - -To generate an RSA key, you will use the 'genrsa' command of the 'openssl' -tool.:: - - openssl genrsa -out client.key 4096 - -To generate a CSR for client authentication, openssl requires a config file -that specifies a few values.:: - - $ cat > client.conf << END - [req] - distinguished_name = req_distinguished_name - req_extensions = req_ext - prompt = no - [req_distinguished_name] - CN = Your Name - [req_ext] - extendedKeyUsage = clientAuth - END - -Once you have client.conf, you can run the openssl 'req' command to generate -the CSR.:: - - openssl req -new -days 365 \ - -config client.conf \ - -key client.key \ - -out client.csr - -Now that you have your client CSR, you can use the Magnum CLI to send it off -to Magnum to get it signed and also download the signing cert.:: - - magnum ca-sign --cluster k8s-cluster --csr client.csr > client.crt - magnum ca-show --cluster k8s-cluster > ca.crt - -Here's how to set up the replicated redis example. Now we create a pod for the -redis-master:: - - # Using cluster-config command for faster configuration - eval $(magnum cluster-config k8s-cluster) - - # Test the cert and connection works - kubectl version - - cd kubernetes/examples/redis - kubectl create -f ./redis-master.yaml - -Now create a service to provide a discoverable endpoint for the redis -sentinels in the cluster:: - - kubectl create -f ./redis-sentinel-service.yaml - -To make it a replicated redis cluster create replication controllers for the -redis slaves and sentinels:: - - sed -i 's/\(replicas: \)1/\1 2/' redis-controller.yaml - kubectl create -f ./redis-controller.yaml - - sed -i 's/\(replicas: \)1/\1 2/' redis-sentinel-controller.yaml - kubectl create -f ./redis-sentinel-controller.yaml - -Full lifecycle and introspection operations for each object are supported. -For example, magnum cluster-create, magnum cluster-template-delete. - -Now there are four redis instances (one master and three slaves) running -across the cluster, replicating data between one another. - -Run the cluster-show command to get the IP of the cluster host on which the -redis-master is running:: - - $ magnum cluster-show k8s-cluster - - +--------------------+------------------------------------------------------------+ - | Property | Value | - +--------------------+------------------------------------------------------------+ - | status | CREATE_COMPLETE | - | uuid | cff82cd0-189c-4ede-a9cb-2c0af6997709 | - | stack_id | 7947844a-8e18-4c79-b591-ecf0f6067641 | - | status_reason | Stack CREATE completed successfully | - | created_at | 2016-05-26T17:45:57+00:00 | - | updated_at | 2016-05-26T17:50:02+00:00 | - | create_timeout | 60 | - | api_address | https://172.24.4.4:6443 | - | coe_version | v1.2.0 | - | cluster_template_id| e73298e7-e621-4d42-b35b-7a1952b97158 | - | master_addresses | ['172.24.4.6'] | - | node_count | 1 | - | node_addresses | ['172.24.4.5'] | - | master_count | 1 | - | container_version | 1.9.1 | - | discovery_url | https://discovery.etcd.io/4caaa65f297d4d49ef0a085a7aecf8e0 | - | name | k8s-cluster | - +--------------------+------------------------------------------------------------+ - -The output here indicates the redis-master is running on the cluster host with -IP address 172.24.4.5. To access the redis master:: - - $ ssh fedora@172.24.4.5 - $ REDIS_ID=$(sudo docker ps | grep redis:v1 | grep k8s_master | awk '{print $1}') - $ sudo docker exec -i -t $REDIS_ID redis-cli - - 127.0.0.1:6379> set replication:test true - OK - ^D - - $ exit # Log out of the host - -Log into one of the other container hosts and access a redis slave from it. -You can use `nova list` to enumerate the kube-minions. For this example we -will use the same host as above:: - - $ ssh fedora@172.24.4.5 - $ REDIS_ID=$(sudo docker ps | grep redis:v1 | grep k8s_redis | awk '{print $1}') - $ sudo docker exec -i -t $REDIS_ID redis-cli - - 127.0.0.1:6379> get replication:test - "true" - ^D - - $ exit # Log out of the host - -Additional useful commands from a given minion:: - - sudo docker ps # View Docker containers on this minion - kubectl get pods # Get pods - kubectl get rc # Get replication controllers - kubectl get svc # Get services - kubectl get nodes # Get nodes - -After you finish using the cluster, you want to delete it. A cluster can be -deleted as follows:: - - magnum cluster-delete k8s-cluster - -Building and Using a Swarm Cluster -================================== - -Create a ClusterTemplate. It is very similar to the Kubernetes ClusterTemplate, -except for the absence of some Kubernetes-specific arguments and the use of -'swarm' as the COE:: - - magnum cluster-template-create swarm-cluster-template \ - --image fedora-atomic-latest \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --docker-volume-size 5 \ - --coe swarm - -**NOTE:** If you are using Magnum behind a firewall then refer -to ``_ - -Finally, create the cluster. Use the ClusterTemplate 'swarm-cluster-template' -as a template for cluster creation. This cluster will result in one swarm -manager node and two extra agent nodes:: - - magnum cluster-create swarm-cluster \ - --cluster-template swarm-cluster-template \ - --node-count 2 - -Now that we have a swarm cluster we can start interacting with it:: - - $ magnum cluster-show swarm-cluster - - +--------------------+------------------------------------------------------------+ - | Property | Value | - +--------------------+------------------------------------------------------------+ - | status | CREATE_COMPLETE | - | uuid | eda91c1e-6103-45d4-ab09-3f316310fa8e | - | stack_id | 7947844a-8e18-4c79-b591-ecf0f6067641 | - | status_reason | Stack CREATE completed successfully | - | created_at | 2015-04-20T19:05:27+00:00 | - | updated_at | 2015-04-20T19:06:08+00:00 | - | create_timeout | 60 | - | api_address | https://172.24.4.4:6443 | - | coe_version | 1.2.5 | - | cluster_template_id| e73298e7-e621-4d42-b35b-7a1952b97158 | - | master_addresses | ['172.24.4.6'] | - | node_count | 2 | - | node_addresses | ['172.24.4.5'] | - | master_count | 1 | - | container_version | 1.9.1 | - | discovery_url | https://discovery.etcd.io/4caaa65f297d4d49ef0a085a7aecf8e0 | - | name | swarm-cluster | - +--------------------+------------------------------------------------------------+ - -We now need to setup the docker CLI to use the swarm cluster we have created -with the appropriate credentials. - -Create a dir to store certs and cd into it. The `DOCKER_CERT_PATH` env variable -is consumed by docker which expects ca.pem, key.pem and cert.pem to be in that -directory.:: - - export DOCKER_CERT_PATH=~/.docker - mkdir -p ${DOCKER_CERT_PATH} - cd ${DOCKER_CERT_PATH} - -Generate an RSA key.:: - - openssl genrsa -out key.pem 4096 - -Create openssl config to help generated a CSR.:: - - $ cat > client.conf << END - [req] - distinguished_name = req_distinguished_name - req_extensions = req_ext - prompt = no - [req_distinguished_name] - CN = Your Name - [req_ext] - extendedKeyUsage = clientAuth - END - -Run the openssl 'req' command to generate the CSR.:: - - openssl req -new -days 365 \ - -config client.conf \ - -key key.pem \ - -out client.csr - -Now that you have your client CSR use the Magnum CLI to get it signed and also -download the signing cert.:: - - magnum ca-sign --cluster swarm-cluster --csr client.csr > cert.pem - magnum ca-show --cluster swarm-cluster > ca.pem - -Set the CLI to use TLS . This env var is consumed by docker.:: - - export DOCKER_TLS_VERIFY="1" - -Set the correct host to use which is the public ip address of swarm API server -endpoint. This env var is consumed by docker.:: - - export DOCKER_HOST=$(magnum cluster-show swarm-cluster | awk '/ api_address /{print substr($4,7)}') - -Next we will create a container in this swarm cluster. This container will ping -the address 8.8.8.8 four times:: - - docker run --rm -it cirros:latest ping -c 4 8.8.8.8 - -You should see a similar output to:: - - PING 8.8.8.8 (8.8.8.8): 56 data bytes - 64 bytes from 8.8.8.8: seq=0 ttl=40 time=25.513 ms - 64 bytes from 8.8.8.8: seq=1 ttl=40 time=25.348 ms - 64 bytes from 8.8.8.8: seq=2 ttl=40 time=25.226 ms - 64 bytes from 8.8.8.8: seq=3 ttl=40 time=25.275 ms - - --- 8.8.8.8 ping statistics --- - 4 packets transmitted, 4 packets received, 0% packet loss - round-trip min/avg/max = 25.226/25.340/25.513 ms - -Building and Using a Mesos Cluster -================================== - -Provisioning a mesos cluster requires a Ubuntu-based image with some packages -pre-installed. To build and upload such image, please refer to -``_ - -Alternatively, you can download and upload a pre-built image:: - - wget https://fedorapeople.org/groups/magnum/ubuntu-mesos-latest.qcow2 - openstack image create ubuntu-mesos --public \ - --disk-format=qcow2 --container-format=bare \ - --property os_distro=ubuntu --file=ubuntu-mesos-latest.qcow2 - -Then, create a ClusterTemplate by using 'mesos' as the COE, with the rest of -arguments similar to the Kubernetes ClusterTemplate:: - - magnum cluster-template-create mesos-cluster-template --image ubuntu-mesos \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --coe mesos - -Finally, create the cluster. Use the ClusterTemplate 'mesos-cluster-template' -as a template for cluster creation. This cluster will result in one mesos -master node and two mesos slave nodes:: - - magnum cluster-create mesos-cluster \ - --cluster-template mesos-cluster-template \ - --node-count 2 - -Now that we have a mesos cluster we can start interacting with it. First we -need to make sure the cluster's status is 'CREATE_COMPLETE':: - - $ magnum cluster-show mesos-cluster - - +--------------------+------------------------------------------------------------+ - | Property | Value | - +--------------------+------------------------------------------------------------+ - | status | CREATE_COMPLETE | - | uuid | ff727f0d-72ca-4e2b-9fef-5ec853d74fdf | - | stack_id | 7947844a-8e18-4c79-b591-ecf0f6067641 | - | status_reason | Stack CREATE completed successfully | - | created_at | 2015-06-09T20:21:43+00:00 | - | updated_at | 2015-06-09T20:28:18+00:00 | - | create_timeout | 60 | - | api_address | https://172.24.4.115:6443 | - | coe_version | - | - | cluster_template_id| 92dbda62-32d4-4435-88fc-8f42d514b347 | - | master_addresses | ['172.24.4.115'] | - | node_count | 2 | - | node_addresses | ['172.24.4.116', '172.24.4.117'] | - | master_count | 1 | - | container_version | 1.9.1 | - | discovery_url | None | - | name | mesos-cluster | - +--------------------+------------------------------------------------------------+ - -Next we will create a container in this cluster by using the REST API of -Marathon. This container will ping the address 8.8.8.8:: - - $ cat > mesos.json << END - { - "container": { - "type": "DOCKER", - "docker": { - "image": "cirros" - } - }, - "id": "ubuntu", - "instances": 1, - "cpus": 0.5, - "mem": 512, - "uris": [], - "cmd": "ping 8.8.8.8" - } - END - $ MASTER_IP=$(magnum cluster-show mesos-cluster | awk '/ api_address /{print $4}') - $ curl -X POST -H "Content-Type: application/json" \ - http://${MASTER_IP}:8080/v2/apps -d@mesos.json - -To check application and task status:: - - $ curl http://${MASTER_IP}:8080/v2/apps - $ curl http://${MASTER_IP}:8080/v2/tasks - -You can access to the Mesos web page at \http://:5050/ and Marathon web -console at \http://:8080/. - -Building Developer Documentation -================================ - -To build the documentation locally (e.g., to test documentation changes -before uploading them for review) chdir to the magnum root folder and -run tox:: - - tox -edocs - -**NOTE:** The first time you run this will take some extra time as it -creates a virtual environment to run in. - -When complete, the documentation can be accessed from:: - - doc/build/html/index.html diff --git a/doc/source/contributor/reno.rst b/doc/source/contributor/reno.rst deleted file mode 100644 index 98bb4eb4..00000000 --- a/doc/source/contributor/reno.rst +++ /dev/null @@ -1,59 +0,0 @@ -Release Notes -============= - -What is reno ? --------------- - -Magnum uses `reno `_ for -providing release notes in-tree. That means that a patch can include a *reno -file* or a series can have a follow-on change containing that file explaining -what the impact is. - -A *reno file* is a YAML file written in the releasenotes/notes tree which is -generated using the reno tool this way: - -.. code-block:: bash - - $ tox -e venv -- reno new - -where usually ```` can be ``bp-`` for a -blueprint or ``bug-XXXXXX`` for a bugfix. - -Refer to the `reno documentation `_ -for the full list of sections. - - -When a release note is needed ------------------------------ - -A release note is required anytime a reno section is needed. Below are some -examples for each section. Any sections that would be blank should be left out -of the note file entirely. If no section is needed, then you know you don't -need to provide a release note :-) - -* ``upgrade`` - * The patch has an `UpgradeImpact `_ tag - * A DB change needs some deployer modification (like a migration) - * A configuration option change (deprecation, removal or modified default) - * some specific changes that have a `DocImpact `_ tag - but require further action from an deployer perspective - * any patch that requires an action from the deployer in general - -* ``security`` - * If the patch fixes a known vulnerability - -* ``features`` - * If the patch has an `APIImpact `_ tag - -* ``critical`` - * Bugfixes categorized as Critical in Launchpad *impacting users* - -* ``fixes`` - * No clear definition of such bugfixes. Hairy long-standing bugs with high - importance that have been fixed are good candidates though. - - -Three sections are left intentionally unexplained (``prelude``, ``issues`` and -``other``). Those are targeted to be filled in close to the release time for -providing details about the soon-ish release. Don't use them unless you know -exactly what you are doing. diff --git a/doc/source/images/MagnumVolumeIntegration.png b/doc/source/images/MagnumVolumeIntegration.png deleted file mode 100644 index 09a1378f..00000000 Binary files a/doc/source/images/MagnumVolumeIntegration.png and /dev/null differ diff --git a/doc/source/images/cluster-create.png b/doc/source/images/cluster-create.png deleted file mode 100644 index 31cf6308..00000000 Binary files a/doc/source/images/cluster-create.png and /dev/null differ diff --git a/doc/source/images/cluster-template-details.png b/doc/source/images/cluster-template-details.png deleted file mode 100644 index e7c75d47..00000000 Binary files a/doc/source/images/cluster-template-details.png and /dev/null differ diff --git a/doc/source/images/cluster-template.png b/doc/source/images/cluster-template.png deleted file mode 100644 index a424c7bc..00000000 Binary files a/doc/source/images/cluster-template.png and /dev/null differ diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 2dc125d3..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,101 +0,0 @@ -.. - Copyright 2014-2015 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============================================ -Welcome to Magnum's Developer Documentation! -============================================ - -Magnum is an OpenStack project which offers container orchestration engines -for deploying and managing containers as first class resources in OpenStack. - -* **Free software:** under the `Apache license `_ -* **Source:** http://git.openstack.org/cgit/openstack/magnum -* **Blueprints:** https://blueprints.launchpad.net/magnum -* **Bugs:** http://bugs.launchpad.net/magnum -* **REST Client:** http://git.openstack.org/cgit/openstack/python-magnumclient - -Architecture -============ - -There are several different types of objects in the magnum system: - -* **Cluster:** A collection of node objects where work is scheduled -* **ClusterTemplate:** An object stores template information about the cluster - which is used to create new clusters consistently - -Two binaries work together to compose the magnum system. The first binary -(accessed by the python-magnumclient code) is the magnum-api REST server. The -REST server may run as one process or multiple processes. When a REST request -is sent to the client API, the request is sent via AMQP to the -magnum-conductor process. The REST server is horizontally scalable. At this -time, the conductor is limited to one process, but we intend to add horizontal -scalability to the conductor as well. - -Features -======== - -* Abstractions for Clusters -* Integration with Kubernetes, Swarm, Mesos for backend container technology -* Integration with Keystone for multi-tenant security -* Integration with Neutron for Kubernetes multi-tenancy network security -* Integration with Cinder to provide volume service for containers - -User Documentation -================== -.. toctree:: - :maxdepth: 1 - - user/index - -Contributor Guide -================= -.. toctree:: - :maxdepth: 1 - - contributor/index - -Admin Guide -=========== -.. toctree:: - :maxdepth: 1 - - admin/index - -Installation Guide -================== -.. toctree:: - :maxdepth: 1 - - install/index - - -Sample Configurations -===================== - -.. toctree:: - :maxdepth: 1 - - configuration/index - -Work In Progress -================ - -.. toctree:: - :maxdepth: 1 - - admin/troubleshooting-guide.rst - user/index.rst - admin/configuring.rst diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 82fd44e6..00000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -========================= -Magnum Installation Guide -========================= - -The Container Infrastructure Management service is a collection of components -that provides support to manage different container orchestration engines(COE) -like Kubernetes, Mesos and Swarm. - -This chapter assumes a working setup of OpenStack following the -`OpenStack Installation Guides `_. -It is recommended to follow the `Container Infrastructure Management Service -`_ -section from the OpenStack Installation Tutorials. This chapter contains the -following sections: - -.. toctree:: - :maxdepth: 1 - - install-guide-from-source diff --git a/doc/source/install/install-guide-from-source.rst b/doc/source/install/install-guide-from-source.rst deleted file mode 100644 index d40b4c5e..00000000 --- a/doc/source/install/install-guide-from-source.rst +++ /dev/null @@ -1,617 +0,0 @@ -.. _install: - -=================================================================== -Install the Container Infrastructure Management service from source -=================================================================== - -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Container -Infrastructure Management service, code-named magnum, on the controller node. - -This section assumes that you already have a working OpenStack environment with -at least the following components installed: Identity service, Image service, -Compute service, Networking service, Block Storage service and Orchestration -service. See `OpenStack Install Guides `__. - -To provide access to Docker Swarm or Kubernetes using the native clients -(docker or kubectl respectively) magnum uses TLS certificates. To store the -certificates, it is recommended to use the `Key Manager service, code-named -barbican `__, or you can save them in magnum's database. - -Optionally, you can install the following components: - -- `Load Balancer as a Service (LBaaS v2) `__ to create clusters with multiple - masters -- `Bare Metal service `__ to create baremetal clusters -- `Object Storage service `__ to make private Docker registries available to - users -- `Telemetry Data Collection service `__ to periodically send - magnum-related metrics - -.. important:: - - Magnum creates clusters of compute instances on the Compute service (nova). - These instances must have basic Internet connectivity and must be able to - reach magnum's API server. Make sure that the Compute and Network services - are configured accordingly. - -Prerequisites -------------- - -Before you install and configure the Container Infrastructure Management -service, you must create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - * Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - * Create the ``magnum`` database: - - .. code-block:: console - - CREATE DATABASE magnum; - - * Grant proper access to the ``magnum`` database: - - .. code-block:: console - - GRANT ALL PRIVILEGES ON magnum.* TO 'magnum'@'controller' \ - IDENTIFIED BY 'MAGNUM_DBPASS'; - GRANT ALL PRIVILEGES ON magnum.* TO 'magnum'@'%' \ - IDENTIFIED BY 'MAGNUM_DBPASS'; - - Replace ``MAGNUM_DBPASS`` with a suitable password. - - * Exit the database access client. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - * Create the ``magnum`` user: - - .. code-block:: console - - - $ openstack user create --domain default \ - --password-prompt magnum - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | a8ebafc275c54d389dfc1bff8b4fe286 | - | name | magnum | - +-----------+----------------------------------+ - - * Add the ``admin`` role to the ``magnum`` user: - - .. code-block:: console - - $ openstack role add --project service --user magnum admin - - .. note:: - - This command provides no output. - - * Create the ``magnum`` service entity: - - .. code-block:: console - - $ openstack service create --name magnum \ - --description "OpenStack Container Infrastructure Management Service" \ - container-infra - +-------------+-------------------------------------------------------+ - | Field | Value | - +-------------+-------------------------------------------------------+ - | description | OpenStack Container Infrastructure Management Service | - | enabled | True | - | id | 194faf83e8fd4e028e5ff75d3d8d0df2 | - | name | magnum | - | type | container-infra | - +-------------+-------------------------------------------------------+ - -#. Create the Container Infrastructure Management service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - container-infra public http://CONTROLLER_IP:9511/v1 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | cb137e6366ad495bb521cfe92d8b8858 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | - | service_name | magnum | - | service_type | container-infra | - | url | http://CONTROLLER_IP:9511/v1 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - container-infra internal http://CONTROLLER_IP:9511/v1 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 17cbc3b6f51449a0a818118d6d62868d | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | - | service_name | magnum | - | service_type | container-infra | - | url | http://CONTROLLER_IP:9511/v1 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - container-infra admin http://CONTROLLER_IP:9511/v1 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 30f8888e6b6646d7b5cd14354c95a684 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | - | service_name | magnum | - | service_type | container-infra | - | url | http://CONTROLLER_IP:9511/v1 | - +--------------+----------------------------------+ - - Replace ``CONTROLLER_IP`` with the IP magnum listens to. Alternatively, - you can use a hostname which is reachable by the Compute instances. - -#. Magnum requires additional information in the Identity service to - manage clusters. To add this information, complete these steps: - - * Create the ``magnum`` domain that contains projects and users: - - .. code-block:: console - - $ openstack domain create --description "Owns users and projects \ - created by magnum" magnum - +-------------+-------------------------------------------+ - | Field | Value | - +-------------+-------------------------------------------+ - | description | Owns users and projects created by magnum | - | enabled | True | - | id | 66e0469de9c04eda9bc368e001676d20 | - | name | magnum | - +-------------+-------------------------------------------+ - - * Create the ``magnum_domain_admin`` user to manage projects and users - in the ``magnum`` domain: - - .. code-block:: console - - $ openstack user create --domain magnum --password-prompt \ - magnum_domain_admin - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | 66e0469de9c04eda9bc368e001676d20 | - | enabled | True | - | id | 529b81cf35094beb9784c6d06c090c2b | - | name | magnum_domain_admin | - +-----------+----------------------------------+ - - * Add the ``admin`` role to the ``magnum_domain_admin`` user in the - ``magnum`` domain to enable administrative management privileges - by the ``magnum_domain_admin`` user: - - .. code-block:: console - - $ openstack role add --domain magnum --user-domain magnum \ - --user magnum_domain_admin admin - - .. note:: - - This command provides no output. - -Install and configure components --------------------------------- - -#. Install OS-specific prerequisites: - - * Ubuntu 14.04 (trusty) or higher, Debian 8: - - .. code-block:: console - - # apt-get update - # apt-get install python-dev libssl-dev libxml2-dev \ - libmysqlclient-dev libxslt-dev libpq-dev git \ - libffi-dev gettext build-essential - - * Fedora 21 / Centos 7 / RHEL 7 - - .. code-block:: console - - # yum install python-devel openssl-devel mysql-devel \ - libxml2-devel libxslt-devel postgresql-devel git \ - libffi-devel gettext gcc - - * Fedora 22 or higher - - .. code-block:: console - - # dnf install python-devel openssl-devel mysql-devel \ - libxml2-devel libxslt-devel postgresql-devel git \ - libffi-devel gettext gcc - - * openSUSE Leap 42.1 - - .. code-block:: console - - # zypper install git libffi-devel libmysqlclient-devel \ - libopenssl-devel libxml2-devel libxslt-devel \ - postgresql-devel python-devel gettext-runtime gcc - -2. Create magnum user and necessary directories: - - * Create user: - - .. code-block:: console - - # groupadd --system magnum - # useradd --home-dir "/var/lib/magnum" \ - --create-home \ - --system \ - --shell /bin/false \ - -g magnum \ - magnum - - * Create directories: - - .. code-block:: console - - # mkdir -p /var/log/magnum - # mkdir -p /etc/magnum - - * Set ownership to directories: - - .. code-block:: console - - # chown magnum:magnum /var/log/magnum - # chown magnum:magnum /var/lib/magnum - # chown magnum:magnum /etc/magnum - -3. Install virtualenv and python prerequisites: - - * Install virtualenv and create one for magnum's installation: - - .. code-block:: console - - # easy_install -U virtualenv - # su -s /bin/sh -c "virtualenv /var/lib/magnum/env" magnum - - * Install python prerequisites: - - .. code-block:: console - - # su -s /bin/sh -c "/var/lib/magnum/env/bin/pip install tox pymysql \ - python-memcached" magnum - -4. Clone and install magnum: - - .. code-block:: console - - # cd /var/lib/magnum - # git clone https://git.openstack.org/openstack/magnum.git - # chown -R magnum:magnum magnum - # cd magnum - # su -s /bin/sh -c "/var/lib/magnum/env/bin/pip install -r requirements.txt" magnum - # su -s /bin/sh -c "/var/lib/magnum/env/bin/python setup.py install" magnum - -5. Copy policy.json and api-paste.ini: - - .. code-block:: console - - # su -s /bin/sh -c "cp etc/magnum/policy.json /etc/magnum" magnum - # su -s /bin/sh -c "cp etc/magnum/api-paste.ini /etc/magnum" magnum - -6. Generate a sample configuration file: - - .. code-block:: console - - # su -s /bin/sh -c "/var/lib/magnum/env/bin/tox -e genconfig" magnum - # su -s /bin/sh -c "cp etc/magnum/magnum.conf.sample \ - /etc/magnum/magnum.conf" magnum - -7. Edit the ``/etc/magnum/magnum.conf``: - - * In the ``[DEFAULT]`` section, - configure ``RabbitMQ`` message queue access: - - .. code-block:: ini - - [DEFAULT] - ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - Replace ``RABBIT_PASS`` with the password you chose for the - ``openstack`` account in ``RabbitMQ``. - - * In the ``[api]`` section, configure the host: - - .. code-block:: ini - - [api] - ... - host = CONTROLLER_IP - - Replace ``CONTROLLER_IP`` with the IP address on which you wish magnum api - should listen. - - * In the ``[certificates]`` section, select ``barbican`` (or ``local`` if - you don't have barbican installed): - - * Use barbican to store certificates: - - .. code-block:: ini - - [certificates] - ... - cert_manager_type = barbican - - .. important:: - - Barbican is recommended for production environments, local store should - be used for evaluation purposes. - - * To use local store for certificates, you have to create and specify the - directory to use: - - .. code-block:: console - - # su -s /bin/sh -c "mkdir -p /var/lib/magnum/certificates/" magnum - - .. code-block:: ini - - [certificates] - ... - cert_manager_type = local - storage_path = /var/lib/magnum/certificates/ - - * In the ``[cinder_client]`` section, configure the region name: - - .. code-block:: ini - - [cinder_client] - ... - region_name = RegionOne - - * In the ``[database]`` section, configure database access: - - .. code-block:: ini - - [database] - ... - connection = mysql+pymysql://magnum:MAGNUM_DBPASS@controller/magnum - - Replace ``MAGNUM_DBPASS`` with the password you chose for - the magnum database. - - * In the ``[keystone_authtoken]`` and ``trust`` sections, configure - Identity service access: - - .. code-block:: ini - - [keystone_authtoken] - ... - memcached_servers = controller:11211 - auth_version = v3 - auth_uri = http://controller:5000/v3 - project_domain_name = default - project_name = service - user_domain_name = default - password = MAGNUM_PASS - username = magnum - auth_url = http://controller:35357 - auth_type = password - - [trust] - ... - trustee_domain_name = magnum - trustee_domain_admin_name = magnum_domain_admin - trustee_domain_admin_password = DOMAIN_ADMIN_PASS - - ``trustee_domain_name`` is the name of the ``magnum`` domain and - ``trustee_domain_admin_name`` is the name of the ``magnum_domain_admin`` - user. Replace MAGNUM_PASS with the password you chose for the magnum user in the - Identity service and DOMAIN_ADMIN_PASS with the password you chose for the - ``magnum_domain_admin`` user. - - * In the ``[oslo_concurrency]`` section, configure the ``lock_path``: - - .. code-block:: ini - - [oslo_concurrency] - ... - lock_path = /var/lib/magnum/tmp - - * In the ``[oslo_messaging_notifications]`` section, configure the - ``driver``: - - .. code-block:: ini - - [oslo_messaging_notifications] - ... - driver = messaging - - .. note:: - - Make sure that ``/etc/magnum/magnum.conf`` still have the correct - permissions. You can set the permissions again with: - - # chown magnum:magnum /etc/magnum/magnum.conf - -8. Populate Magnum database: - - .. code-block:: console - - # su -s /bin/sh -c "/var/lib/magnum/env/bin/magnum-db-manage upgrade" magnum - -9. Set magnum for log rotation: - - .. code-block:: console - - # cd /var/lib/magnum/magnum - # cp doc/examples/etc/logrotate.d/magnum.logrotate /etc/logrotate.d/magnum - -Finalize installation ---------------------- - -#. Create init scripts and services: - - * Ubuntu 14.04 (trusty): - - .. code-block:: console - - # cd /var/lib/magnum/magnum - # cp doc/examples/etc/init/magnum-api.conf \ - /etc/init/magnum-api.conf - # cp doc/examples/etc/init/magnum-conductor.conf \ - /etc/init/magnum-conductor.conf - - * Ubuntu 14.10 or higher, Fedora 21 or higher/RHEL 7/CentOS 7, openSUSE - Leap 42.1 or Debian 8: - - .. code-block:: console - - # cd /var/lib/magnum/magnum - # cp doc/examples/etc/systemd/system/magnum-api.service \ - /etc/systemd/system/magnum-api.service - # cp doc/examples/etc/systemd/system/magnum-conductor.service \ - /etc/systemd/system/magnum-conductor.service - -#. Start magnum-api and magnum-conductor: - - * Ubuntu 14.04 (trusty): - - .. code-block:: console - - # start magnum-api - # start magnum-conductor - - * Ubuntu 14.10 or higher, Fedora 21 or higher/RHEL 7/CentOS 7, openSUSE - Leap 42.1 or Debian 8: - - .. code-block:: console - - # systemctl enable magnum-api - # systemctl enable magnum-conductor - - .. code-block:: console - - # systemctl start magnum-api - # systemctl start magnum-conductor - -#. Verify that magnum-api and magnum-conductor services are running: - - * Ubuntu 14.04 (trusty): - - .. code-block:: console - - # status magnum-api - # status magnum-conductor - - * Ubuntu 14.10 or higher, Fedora 21 or higher/RHEL 7/CentOS 7, openSUSE - Leap 42.1 or Debian 8: - - .. code-block:: console - - # systemctl status magnum-api - # systemctl status magnum-conductor - -Install the command-line client -------------------------------- - -#. Install OS-specific prerequisites: - - * Fedora 21/RHEL 7/CentOS 7 - - .. code-block:: console - - # yum install python-devel openssl-devel python-virtualenv \ - libffi-devel git gcc - - * Fedora 22 or higher - - .. code-block:: console - - # dnf install python-devel openssl-devel python-virtualenv \ - libffi-devel git gcc - - * Ubuntu/Debian - - .. code-block:: console - - # apt-get update - # apt-get install python-dev libssl-dev python-virtualenv \ - libffi-dev git gcc - - * openSUSE Leap 42.1 - - .. code-block:: console - - # zypper install python-devel libopenssl-devel python-virtualenv \ - libffi-devel git gcc - -#. Install the client in a virtual environment: - - .. code-block:: console - - $ cd ~ - $ git clone https://git.openstack.org/openstack/python-magnumclient.git - $ cd python-magnumclient - $ virtualenv .magnumclient-env - $ .magnumclient-env/bin/pip install -r requirements.txt - $ .magnumclient-env/bin/python setup.py install - -#. Now, you can export the client in your PATH: - - .. code-block:: console - - $ export PATH=$PATH:${PWD}/.magnumclient-env/bin/magnum - - .. note:: - - The command-line client can be installed on the controller node or - on a different host than the service. It is good practice to install it - as a non-root user. - -Next Steps ----------- - -Since you have the Container Infrastructure Management service running, you -can `Verify Operation `__ and `Launch an -instance `__. diff --git a/doc/source/user/cluster-type-definition.rst b/doc/source/user/cluster-type-definition.rst deleted file mode 100644 index 4904dc83..00000000 --- a/doc/source/user/cluster-type-definition.rst +++ /dev/null @@ -1,105 +0,0 @@ -There are three key pieces to a Cluster Type Definition: - -1. Heat Stack template - The HOT file that Magnum will use to generate a - cluster using a Heat Stack. -2. Template definition - Magnum's interface for interacting with the Heat - template. -3. Definition Entry Point - Used to advertise the available Cluster Types. - -The Heat Stack Template ------------------------ - -The Heat Stack Template is where most of the real work happens. The result of -the Heat Stack Template should be a full Container Orchestration Environment. - -The Template Definition ------------------------ - -Template definitions are a mapping of Magnum object attributes and Heat -template parameters, along with Magnum consumable template outputs. A -Cluster Type Definition indicates which Cluster Types it can provide. -Cluster Types are how Magnum determines which of the enabled Cluster -Type Definitions it will use for a given cluster. - -The Definition Entry Point --------------------------- - -Entry points are a standard discovery and import mechanism for Python objects. -Each Template Definition should have an Entry Point in the -`magnum.template_definitions` group. This example exposes it's Template -Definition as `example_template = example_template:ExampleTemplate` in the -`magnum.template_definitions` group. - -Installing Cluster Templates ----------------------------- - -Because Cluster Type Definitions are basically Python projects, they can be -worked with like any other Python project. They can be cloned from version -control and installed or uploaded to a package index and installed via -utilities such as pip. - -Enabling a Cluster Type is as simple as adding it's Entry Point to the -`enabled_definitions` config option in magnum.conf.:: - - # Setup python environment and install Magnum - - $ virtualenv .venv - $ source .venv/bin/active - (.venv)$ git clone https://github.com/openstack/magnum.git - (.venv)$ cd magnum - (.venv)$ python setup.py install - - # List installed templates, notice default templates are enabled - - (.venv)$ magnum-template-manage list-templates - Enabled Templates - magnum_vm_atomic_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster.yaml - magnum_vm_coreos_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster-coreos.yaml - Disabled Templates - - # Install example template - - (.venv)$ cd contrib/templates/example - (.venv)$ python setup.py install - - # List installed templates, notice example template is disabled - - (.venv)$ magnum-template-manage list-templates - Enabled Templates - magnum_vm_atomic_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster.yaml - magnum_vm_coreos_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster-coreos.yaml - Disabled Templates - example_template: /home/example/.venv/local/lib/python2.7/site-packages/ExampleTemplate-0.1-py2.7.egg/example_template/example.yaml - - # Enable example template by setting enabled_definitions in magnum.conf - - (.venv)$ sudo mkdir /etc/magnum - (.venv)$ sudo bash -c "cat > /etc/magnum/magnum.conf << END_CONF - [bay] - enabled_definitions=magnum_vm_atomic_k8s,magnum_vm_coreos_k8s,example_template - END_CONF" - - # List installed templates, notice example template is now enabled - - (.venv)$ magnum-template-manage list-templates - Enabled Templates - example_template: /home/example/.venv/local/lib/python2.7/site-packages/ExampleTemplate-0.1-py2.7.egg/example_template/example.yaml - magnum_vm_atomic_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster.yaml - magnum_vm_coreos_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster-coreos.yaml - Disabled Templates - - # Use --details argument to get more details about each template - - (.venv)$ magnum-template-manage list-templates --details - Enabled Templates - example_template: /home/example/.venv/local/lib/python2.7/site-packages/ExampleTemplate-0.1-py2.7.egg/example_template/example.yaml - Server_Type OS CoE - vm example example_coe - magnum_vm_atomic_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster.yaml - Server_Type OS CoE - vm fedora-atomic kubernetes - magnum_vm_coreos_k8s: /home/example/.venv/local/lib/python2.7/site-packages/magnum/templates/kubernetes/kubecluster-coreos.yaml - Server_Type OS CoE - vm coreos kubernetes - Disabled Templates - diff --git a/doc/source/user/heat-templates.rst b/doc/source/user/heat-templates.rst deleted file mode 100644 index d12c4435..00000000 --- a/doc/source/user/heat-templates.rst +++ /dev/null @@ -1,10 +0,0 @@ -Heat Stack Templates are what Magnum passes to Heat to generate a cluster. For -each ClusterTemplate resource in Magnum, a Heat stack is created to arrange all -of the cloud resources needed to support the container orchestration -environment. These Heat stack templates provide a mapping of Magnum object -attributes to Heat template parameters, along with Magnum consumable stack -outputs. Magnum passes the Heat Stack Template to the Heat service to create a -Heat stack. The result is a full Container Orchestration Environment. - -.. list-plugins:: magnum.template_definitions - :detailed: diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst deleted file mode 100644 index f27eabf8..00000000 --- a/doc/source/user/index.rst +++ /dev/null @@ -1,2817 +0,0 @@ -================= -Magnum User Guide -================= - -This guide is intended for users who use Magnum to deploy and manage clusters -of hosts for a Container Orchestration Engine. It describes the infrastructure -that Magnum creates and how to work with them. - -Section 1-3 describe Magnum itself, including an overview, the CLI and -Horizon interface. Section 4-9 describe the Container Orchestration -Engine (COE) supported along with a guide on how to select one that -best meets your needs and how to develop a driver for a new COE. -Section 10-15 describe the low level OpenStack infrastructure that is -created and managed by Magnum to support the COE's. - -======== -Contents -======== - -#. `Overview`_ -#. `Python Client`_ -#. `Horizon Interface`_ -#. `Cluster Drivers`_ -#. `Cluster Type Definition`_ -#. `Heat Stack Templates`_ -#. `Choosing a COE`_ -#. `Native Clients`_ -#. `Kubernetes`_ -#. `Swarm`_ -#. `Mesos`_ -#. `Transport Layer Security`_ -#. `Networking`_ -#. `High Availability`_ -#. `Scaling`_ -#. `Storage`_ -#. `Image Management`_ -#. `Notification`_ -#. `Container Monitoring`_ -#. `Kubernetes External Load Balancer`_ - -=========== -Terminology -=========== - -Cluster (previously Bay) - A cluster is the construct in which Magnum launches container orchestration - engines. After a cluster has been created the user is able to add containers - to it either directly, or in the case of the Kubernetes container - orchestration engine within pods - a logical construct specific to that - implementation. A cluster is created based on a ClusterTemplate. - -ClusterTemplate (previously BayModel) - A ClusterTemplate in Magnum is roughly equivalent to a flavor in Nova. It - acts as a template that defines options such as the container orchestration - engine, keypair and image for use when Magnum is creating clusters using - the given ClusterTemplate. - -Container Orchestration Engine (COE) - A container orchestration engine manages the lifecycle of one or more - containers, logically represented in Magnum as a cluster. Magnum supports a - number of container orchestration engines, each with their own pros and cons, - including Docker Swarm, Kubernetes, and Mesos. - -======== -Overview -======== - -Magnum is an OpenStack API service developed by the OpenStack Containers Team -making container orchestration engines (COE) such as Docker Swarm, Kubernetes -and Apache Mesos available as the first class resources in OpenStack. - -Magnum uses Heat to orchestrate an OS image which contains Docker and COE -and runs that image in either virtual machines or bare metal in a cluster -configuration. - -Magnum offers complete life-cycle management of COEs in an -OpenStack environment, integrated with other OpenStack services for a seamless -experience for OpenStack users who wish to run containers in an OpenStack -environment. - -Following are few salient features of Magnum: - -- Standard API based complete life-cycle management for Container Clusters -- Multi-tenancy for container clusters -- Choice of COE: Kubernetes, Swarm, Mesos, DC/OS -- Choice of container cluster deployment model: VM or Bare-metal -- Keystone-based multi-tenant security and auth management -- Neutron based multi-tenant network control and isolation -- Cinder based volume service for containers -- Integrated with OpenStack: SSO experience for cloud users -- Secure container cluster access (TLS enabled) - -More details: `Magnum Project Wiki `_ - -=============== -ClusterTemplate -=============== - -A ClusterTemplate (previously known as BayModel) is a collection of parameters -to describe how a cluster can be constructed. Some parameters are relevant to -the infrastructure of the cluster, while others are for the particular COE. In -a typical workflow, a user would create a ClusterTemplate, then create one or -more clusters using the ClusterTemplate. A cloud provider can also define a -number of ClusterTemplates and provide them to the users. A ClusterTemplate -cannot be updated or deleted if a cluster using this ClusterTemplate still -exists. - -The definition and usage of the parameters of a ClusterTemplate are as follows. -They are loosely grouped as: mandatory, infrastructure, COE specific. - -\ - Name of the ClusterTemplate to create. The name does not have to be - unique. If multiple ClusterTemplates have the same name, you will need to - use the UUID to select the ClusterTemplate when creating a cluster or - updating, deleting a ClusterTemplate. If a name is not specified, a random - name will be generated using a string and a number, for example - "pi-13-model". - ---coe \ - Specify the Container Orchestration Engine to use. Supported - COE's include 'kubernetes', 'swarm', 'mesos'. If your environment - has additional cluster drivers installed, refer to the cluster driver - documentation for the new COE names. This is a mandatory parameter - and there is no default value. - ---image \ - The name or UUID of the base image in Glance to boot the servers for - the cluster. The image must have the attribute 'os_distro' defined - as appropriate for the cluster driver. For the currently supported - images, the os_distro names are: - - ========== ===================== - COE os-distro - ========== ===================== - Kubernetes Fedora-atomic, CoreOS - Swarm Fedora-atomic - Mesos Ubuntu - ========== ===================== - - This is a mandatory parameter and there is no default value. - ---keypair \ - The name of the SSH keypair to configure in the cluster servers - for ssh access. You will need the key to be able to ssh to the - servers in the cluster. The login name is specific to the cluster - driver. If keypair is not provided in template it will be required at - Cluster create. This value will be overridden by any keypair value that - is provided during Cluster create. - ---external-network \ - The name or network ID of a Neutron network to provide connectivity - to the external internet for the cluster. This network must be an - external network, i.e. its attribute 'router:external' must be - 'True'. The servers in the cluster will be connected to a private - network and Magnum will create a router between this private network - and the external network. This will allow the servers to download - images, access discovery service, etc, and the containers to install - packages, etc. In the opposite direction, floating IP's will be - allocated from the external network to provide access from the - external internet to servers and the container services hosted in - the cluster. This is a mandatory parameter and there is no default - value. - ---public - Access to a ClusterTemplate is normally limited to the admin, owner or users - within the same tenant as the owners. Setting this flag - makes the ClusterTemplate public and accessible by other users. The default - is not public. - ---server-type \ - The servers in the cluster can be VM or baremetal. This parameter selects - the type of server to create for the cluster. The default is 'vm'. Possible - values are 'vm', 'bm'. - ---network-driver \ - The name of a network driver for providing the networks for the - containers. Note that this is different and separate from the Neutron - network for the cluster. The operation and networking model are specific - to the particular driver; refer to the `Networking`_ section for more - details. Supported network drivers and the default driver are: - - =========== ================= ======== - COE Network-Driver Default - =========== ================= ======== - Kubernetes Flannel Flannel - Swarm Docker, Flannel Flannel - Mesos Docker Docker - =========== ================= ======== - ---volume-driver \ - The name of a volume driver for managing the persistent storage for - the containers. The functionality supported are specific to the - driver. Supported volume drivers and the default driver are: - - ============= ============= =========== - COE Volume-Driver Default - ============= ============= =========== - Kubernetes Cinder No Driver - Swarm Rexray No Driver - Mesos Rexray No Driver - ============= ============= =========== - ---dns-nameserver \ - The DNS nameserver for the servers and containers in the cluster to use. - This is configured in the private Neutron network for the cluster. The - default is '8.8.8.8'. - ---flavor \ - The nova flavor id for booting the node servers. The default - is 'm1.small'. - ---master-flavor \ - The nova flavor id for booting the master or manager servers. The - default is 'm1.small'. - ---http-proxy \ - The IP address for a proxy to use when direct http access from the - servers to sites on the external internet is blocked. This may - happen in certain countries or enterprises, and the proxy allows the - servers and containers to access these sites. The format is a URL - including a port number. The default is 'None'. - ---https-proxy \ - The IP address for a proxy to use when direct https access from the - servers to sites on the external internet is blocked. This may - happen in certain countries or enterprises, and the proxy allows the - servers and containers to access these sites. The format is a URL - including a port number. The default is 'None'. - ---no-proxy \ - When a proxy server is used, some sites should not go through the - proxy and should be accessed normally. In this case, you can - specify these sites as a comma separated list of IP's. The default - is 'None'. - ---docker-volume-size \ - If specified, container images will be stored in a cinder volume of the - specified size in GB. Each cluster node will have a volume attached of - the above size. If not specified, images will be stored in the compute - instance's local disk. For the 'devicemapper' storage driver, the minimum - value is 3GB. For the 'overlay' storage driver, the minimum value is 1GB. - This value can be overridden at cluster creation. - ---docker-storage-driver \ - The name of a driver to manage the storage for the images and the - container's writable layer. The supported drivers are 'devicemapper' - and 'overlay'. The default is 'devicemapper'. - ---labels \ - Arbitrary labels in the form of key=value pairs. The accepted keys - and valid values are defined in the cluster drivers. They are used as a - way to pass additional parameters that are specific to a cluster driver. - Refer to the subsection on labels for a list of the supported - key/value pairs and their usage. - ---tls-disabled - Transport Layer Security (TLS) is normally enabled to secure the - cluster. In some cases, users may want to disable TLS in the cluster, - for instance during development or to troubleshoot certain problems. - Specifying this parameter will disable TLS so that users can access - the COE endpoints without a certificate. The default is TLS - enabled. - ---registry-enabled - Docker images by default are pulled from the public Docker registry, - but in some cases, users may want to use a private registry. This - option provides an alternative registry based on the Registry V2: - Magnum will create a local registry in the cluster backed by swift to - host the images. Refer to - `Docker Registry 2.0 `_ - for more details. The default is to use the public registry. - ---master-lb-enabled - Since multiple masters may exist in a cluster, a load balancer is - created to provide the API endpoint for the cluster and to direct - requests to the masters. In some cases, such as when the LBaaS - service is not available, this option can be set to 'false' to - create a cluster without the load balancer. In this case, one of the - masters will serve as the API endpoint. The default is 'true', - i.e. to create the load balancer for the cluster. - - -Labels ------- - -Labels is a general method to specify supplemental parameters that are -specific to certain COE or associated with certain options. Their -format is key/value pair and their meaning is interpreted by the -drivers that uses them. The drivers do validate the key/value pairs. -Their usage is explained in details in the appropriate sections, -however, since there are many possible labels, the following table -provides a summary to help give a clearer picture. The label keys in -the table are linked to more details elsewhere in the user guide. - -+---------------------------------------+--------------------+---------------+ -| label key | label value | default | -+=======================================+====================+===============+ -| `flannel_network_cidr`_ | IPv4 CIDR | 10.100.0.0/16 | -| | | | -+---------------------------------------+--------------------+---------------+ -| `flannel_backend`_ | - udp | udp | -| | - vxlan | | -| | - host-gw | | -+---------------------------------------+--------------------+---------------+ -| `flannel_network_subnetlen`_ | size of subnet to | 24 | -| | assign to node | | -+---------------------------------------+--------------------+---------------+ -| `rexray_preempt`_ | - true | false | -| | - false | | -+---------------------------------------+--------------------+---------------+ -| `mesos_slave_isolation`_ | - filesystem/posix | "" | -| | - filesystem/linux | | -| | - filesystem/shared| | -| | - posix/cpu | | -| | - posix/mem | | -| | - posix/disk | | -| | - cgroups/cpu | | -| | - cgroups/mem | | -| | - docker/runtime | | -| | - namespaces/pid | | -+---------------------------------------+--------------------+---------------+ -| `mesos_slave_image_providers`_ | - appc | "" | -| | - docker | | -| | - appc,docker | | -+---------------------------------------+--------------------+---------------+ -| `mesos_slave_work_dir`_ | (directory name) | "" | -+---------------------------------------+--------------------+---------------+ -| `mesos_slave_executor_env_variables`_ | (file name) | "" | -+---------------------------------------+--------------------+---------------+ -| `swarm_strategy`_ | - spread | spread | -| | - binpack | | -| | - random | | -+---------------------------------------+--------------------+---------------+ -| `admission_control_list`_ | see below | see below | -+---------------------------------------+--------------------+---------------+ -| `prometheus_monitoring`_ | - true | false | -| | - false | | -+---------------------------------------+--------------------+---------------+ -| `grafana_admin_passwd`_ | (any string) | "admin" | -+---------------------------------------+--------------------+---------------+ -| `docker_volume_type`_ | see below | see below | -+---------------------------------------+--------------------+---------------+ -| `etcd_volume_size`_ | etcd storage | 0 | -| | volume size | | -+---------------------------------------+--------------------+---------------+ - -======= -Cluster -======= - -A cluster (previously known as bay) is an instance of the ClusterTemplate -of a COE. Magnum deploys a cluster by referring to the attributes -defined in the particular ClusterTemplate as well as a few additional -parameters for the cluster. Magnum deploys the orchestration templates -provided by the cluster driver to create and configure all the necessary -infrastructure. When ready, the cluster is a fully operational COE that -can host containers. - -Infrastructure --------------- - -The infrastructure of the cluster consists of the resources provided by -the various OpenStack services. Existing infrastructure, including -infrastructure external to OpenStack, can also be used by the cluster, -such as DNS, public network, public discovery service, Docker registry. -The actual resources created depends on the COE type and the options -specified; therefore you need to refer to the cluster driver documentation -of the COE for specific details. For instance, the option -'--master-lb-enabled' in the ClusterTemplate will cause a load balancer pool -along with the health monitor and floating IP to be created. It is -important to distinguish resources in the IaaS level from resources in -the PaaS level. For instance, the infrastructure networking in -OpenStack IaaS is different and separate from the container networking -in Kubernetes or Swarm PaaS. - -Typical infrastructure includes the following. - -Servers - The servers host the containers in the cluster and these servers can be - VM or bare metal. VM's are provided by Nova. Since multiple VM's - are hosted on a physical server, the VM's provide the isolation - needed for containers between different tenants running on the same - physical server. Bare metal servers are provided by Ironic and are - used when peak performance with virtually no overhead is needed for - the containers. - -Identity - Keystone provides the authentication and authorization for managing - the cluster infrastructure. - -Network - Networking among the servers is provided by Neutron. Since COE - currently are not multi-tenant, isolation for multi-tenancy on the - networking level is done by using a private network for each cluster. - As a result, containers belonging to one tenant will not be - accessible to containers or servers of another tenant. Other - networking resources may also be used, such as load balancer and - routers. Networking among containers can be provided by Kuryr if - needed. - -Storage - Cinder provides the block storage that can be used to host the - containers and as persistent storage for the containers. - -Security - Barbican provides the storage of secrets such as certificates used - for Transport Layer Security (TLS) within the cluster. - - -Life cycle ----------- - -The set of life cycle operations on the cluster is one of the key value -that Magnum provides, enabling clusters to be managed painlessly on -OpenStack. The current operations are the basic CRUD operations, but -more advanced operations are under discussion in the community and -will be implemented as needed. - -**NOTE** The OpenStack resources created for a cluster are fully -accessible to the cluster owner. Care should be taken when modifying or -reusing these resources to avoid impacting Magnum operations in -unexpected manners. For instance, if you launch your own Nova -instance on the cluster private network, Magnum would not be aware of this -instance. Therefore, the cluster-delete operation will fail because -Magnum would not delete the extra Nova instance and the private Neutron -network cannot be removed while a Nova instance is still attached. - -**NOTE** Currently Heat nested templates are used to create the -resources; therefore if an error occurs, you can troubleshoot through -Heat. For more help on Heat stack troubleshooting, refer to the -`Troubleshooting Guide -`_. - - - -Create -++++++ - -**NOTE** bay- are the deprecated versions of these commands and are -still support in current release. They will be removed in a future version. -Any references to the term bay will be replaced in the parameters when using -the 'bay' versions of the commands. For example, in 'bay-create' --baymodel -is used as the baymodel parameter for this command instead of ---cluster-template. - -The 'cluster-create' command deploys a cluster, for example:: - - magnum cluster-create mycluster \ - --cluster-template mytemplate \ - --node-count 8 \ - --master-count 3 - -The 'cluster-create' operation is asynchronous; therefore you can initiate -another 'cluster-create' operation while the current cluster is being created. -If the cluster fails to be created, the infrastructure created so far may -be retained or deleted depending on the particular orchestration -engine. As a common practice, a failed cluster is retained during -development for troubleshooting, but they are automatically deleted in -production. The current cluster drivers use Heat templates and the -resources of a failed 'cluster-create' are retained. - -The definition and usage of the parameters for 'cluster-create' are as -follows: - -\ - Name of the cluster to create. If a name is not specified, a random - name will be generated using a string and a number, for example - "gamma-7-cluster". - ---cluster-template \ - The ID or name of the ClusterTemplate to use. This is a mandatory - parameter. Once a ClusterTemplate is used to create a cluster, it cannot - be deleted or modified until all clusters that use the ClusterTemplate have - been deleted. - ---keypair \ - The name of the SSH keypair to configure in the cluster servers - for ssh access. You will need the key to be able to ssh to the - servers in the cluster. The login name is specific to the cluster - driver. If keypair is not provided it will attempt to use the value in - the ClusterTemplate. If the ClusterTemplate is also missing a keypair value - then an error will be returned. The keypair value provided here will - override the keypair value from the ClusterTemplate. - ---node-count \ - The number of servers that will serve as node in the cluster. - The default is 1. - ---master-count \ - The number of servers that will serve as master for the cluster. - The default is 1. Set to more than 1 master to enable High - Availability. If the option '--master-lb-enabled' is specified in - the ClusterTemplate, the master servers will be placed in a load balancer - pool. - ---discovery-url \ - The custom discovery url for node discovery. This is used by the - COE to discover the servers that have been created to host the - containers. The actual discovery mechanism varies with the COE. In - some cases, Magnum fills in the server info in the discovery - service. In other cases, if the discovery-url is not specified, - Magnum will use the public discovery service at:: - - https://discovery.etcd.io - - In this case, Magnum will generate a unique url here for each cluster - and store the info for the servers. - ---timeout \ - The timeout for cluster creation in minutes. The value expected is a - positive integer and the default is 60 minutes. If the timeout is - reached during cluster-create, the operation will be aborted and the - cluster status will be set to 'CREATE_FAILED'. - -List -++++ - -The 'cluster-list' command lists all the clusters that belong to the tenant, -for example:: - - magnum cluster-list - -Show -++++ - -The 'cluster-show' command prints all the details of a cluster, for -example:: - - magnum cluster-show mycluster - -The properties include those not specified by users that have been -assigned default values and properties from new resources that -have been created for the cluster. - -Update -++++++ - -A cluster can be modified using the 'cluster-update' command, for example:: - - magnum cluster-update mycluster replace node_count=8 - -The parameters are positional and their definition and usage are as -follows. - -\ - This is the first parameter, specifying the UUID or name of the cluster - to update. - -\ - This is the second parameter, specifying the desired change to be - made to the cluster attributes. The allowed changes are 'add', - 'replace' and 'remove'. - -\ - This is the third parameter, specifying the targeted attributes in - the cluster as a list separated by blank space. To add or replace an - attribute, you need to specify the value for the attribute. To - remove an attribute, you only need to specify the name of the - attribute. Currently the only attribute that can be replaced or - removed is 'node_count'. The attributes 'name', 'master_count' and - 'discovery_url' cannot be replaced or delete. The table below - summarizes the possible change to a cluster. - - +---------------+-----+------------------+-----------------------+ - | Attribute | add | replace | remove | - +===============+=====+==================+=======================+ - | node_count | no | add/remove nodes | reset to default of 1 | - +---------------+-----+------------------+-----------------------+ - | master_count | no | no | no | - +---------------+-----+------------------+-----------------------+ - | name | no | no | no | - +---------------+-----+------------------+-----------------------+ - | discovery_url | no | no | no | - +---------------+-----+------------------+-----------------------+ - -The 'cluster-update' operation cannot be initiated when another operation -is in progress. - -**NOTE:** The attribute names in cluster-update are slightly different -from the corresponding names in the cluster-create command: the dash '-' -is replaced by an underscore '_'. For instance, 'node-count' in -cluster-create is 'node_count' in cluster-update. - -Scale -+++++ - -Scaling a cluster means adding servers to or removing servers from the cluster. -Currently, this is done through the 'cluster-update' operation by modifying -the node-count attribute, for example:: - - magnum cluster-update mycluster replace node_count=2 - -When some nodes are removed, Magnum will attempt to find nodes with no -containers to remove. If some nodes with containers must be removed, -Magnum will log a warning message. - -Delete -++++++ - -The 'cluster-delete' operation removes the cluster by deleting all resources -such as servers, network, storage; for example:: - - magnum cluster-delete mycluster - -The only parameter for the cluster-delete command is the ID or name of the -cluster to delete. Multiple clusters can be specified, separated by a blank -space. - -If the operation fails, there may be some remaining resources that -have not been deleted yet. In this case, you can troubleshoot through -Heat. If the templates are deleted manually in Heat, you can delete -the cluster in Magnum to clean up the cluster from Magnum database. - -The 'cluster-delete' operation can be initiated when another operation is -still in progress. - - -============= -Python Client -============= - -Installation ------------- - -Follow the instructions in the OpenStack Installation Guide to enable the -repositories for your distribution: - -* `RHEL/CentOS/Fedora - `_ -* `Ubuntu/Debian - `_ -* `openSUSE/SUSE Linux Enterprise - `_ - -Install using distribution packages for RHEL/CentOS/Fedora:: - - $ sudo yum install python-magnumclient - -Install using distribution packages for Ubuntu/Debian:: - - $ sudo apt-get install python-magnumclient - -Install using distribution packages for OpenSuSE and SuSE Enterprise Linux:: - - $ sudo zypper install python-magnumclient - -Verifying installation ----------------------- - -Execute the `magnum` command with the `--version` argument to confirm that the -client is installed and in the system path:: - - $ magnum --version - 1.1.0 - -Note that the version returned may differ from the above, 1.1.0 was the latest -available version at the time of writing. - -Using the command-line client ------------------------------ - -Refer to the `OpenStack Command-Line Interface Reference -`_ for a full list of the -commands supported by the `magnum` command-line client. - -================= -Horizon Interface -================= - -Magnum provides a Horizon plugin so that users can access the Container -Infrastructure Management service through the OpenStack browser-based -graphical UI. The plugin is available from -`magnum-ui `_. It is not -installed by default in the standard Horizon service, but you can -follow the instruction for `installing a Horizon plugin -`_. - -In Horizon, the container infrastructure panel is part of the -'Project' view and it currently supports the following operations: - -- View list of cluster templates -- View details of a cluster template -- Create a cluster template -- Delete a cluster template -- View list of clusters -- View details of a cluster -- Create a cluster -- Delete a cluster -- Get the Certificate Authority for a cluster -- Sign a user key and obtain a signed certificate for accessing the secured - COE API endpoint in a cluster. - -Other operations are not yet supported and the CLI should be used for these. - -Following is the screenshot of the Horizon view showing the list of cluster -templates. - -.. image:: ../images/cluster-template.png - -Following is the screenshot of the Horizon view showing the details of a -cluster template. - -.. image:: ../images/cluster-template-details.png - -Following is the screenshot of the dialog to create a new cluster. - -.. image:: ../images/cluster-create.png - - -=============== -Cluster Drivers -=============== - -A cluster driver is a collection of python code, heat templates, scripts, -images, and documents for a particular COE on a particular -distro. Magnum presents the concept of ClusterTemplates and clusters. The -implementation for a particular cluster type is provided by the cluster driver. -In other words, the cluster driver provisions and manages the infrastructure -for the COE. Magnum includes default drivers for the following -COE and distro pairs: - -+------------+---------------+ -| COE | distro | -+============+===============+ -| Kubernetes | Fedora Atomic | -+------------+---------------+ -| Kubernetes | CoreOS | -+------------+---------------+ -| Swarm | Fedora Atomic | -+------------+---------------+ -| Mesos | Ubuntu | -+------------+---------------+ - -Magnum is designed to accommodate new cluster drivers to support custom -COE's and this section describes how a new cluster driver can be -constructed and enabled in Magnum. - - -Directory structure -------------------- - -Magnum expects the components to be organized in the following -directory structure under the directory 'drivers':: - - COE_Distro/ - image/ - templates/ - api.py - driver.py - monitor.py - scale.py - template_def.py - version.py - -The minimum required components are: - -driver.py - Python code that implements the controller operations for - the particular COE. The driver must implement: - Currently supported: - ``cluster_create``, ``cluster_update``, ``cluster_delete``. - -templates - A directory of orchestration templates for managing the lifecycle - of clusters, including creation, configuration, update, and deletion. - Currently only Heat templates are supported, but in the future - other orchestration mechanism such as Ansible may be supported. - -template_def.py - Python code that maps the parameters from the ClusterTemplate to the - input parameters for the orchestration and invokes - the orchestration in the templates directory. - -version.py - Tracks the latest version of the driver in this directory. - This is defined by a ``version`` attribute and is represented in the - form of ``1.0.0``. It should also include a ``Driver`` attribute with - descriptive name such as ``fedora_swarm_atomic``. - - -The remaining components are optional: - -image - Instructions for obtaining or building an image suitable for the COE. - -api.py - Python code to interface with the COE. - -monitor.py - Python code to monitor the resource utilization of the cluster. - -scale.py - Python code to scale the cluster by adding or removing nodes. - - - -Sample cluster driver ---------------------- - -To help developers in creating new COE drivers, a minimal cluster driver -is provided as an example. The 'docker' cluster driver will simply deploy -a single VM running Ubuntu with the latest Docker version installed. -It is not a true cluster, but the simplicity will help to illustrate -the key concepts. - -*To be filled in* - - - -Installing a cluster driver ---------------------------- -*To be filled in* - - -======================= -Cluster Type Definition -======================= - -.. include:: cluster-type-definition.rst - -==================== -Heat Stack Templates -==================== - -.. include:: heat-templates.rst - -============== -Choosing a COE -============== -Magnum supports a variety of COE options, and allows more to be added over time -as they gain popularity. As an operator, you may choose to support the full -variety of options, or you may want to offer a subset of the available choices. -Given multiple choices, your users can run one or more clusters, and each may -use a different COE. For example, I might have multiple clusters that use -Kubernetes, and just one cluster that uses Swarm. All of these clusters can -run concurrently, even though they use different COE software. - -Choosing which COE to use depends on what tools you want to use to manage your -containers once you start your app. If you want to use the Docker tools, you -may want to use the Swarm cluster type. Swarm will spread your containers -across the various nodes in your cluster automatically. It does not monitor -the health of your containers, so it can't restart them for you if they stop. -It will not automatically scale your app for you (as of Swarm version 1.2.2). -You may view this as a plus. If you prefer to manage your application yourself, -you might prefer swarm over the other COE options. - -Kubernetes (as of v1.2) is more sophisticated than Swarm (as of v1.2.2). It -offers an attractive YAML file description of a pod, which is a grouping of -containers that run together as part of a distributed application. This file -format allows you to model your application deployment using a declarative -style. It has support for auto scaling and fault recovery, as well as features -that allow for sophisticated software deployments, including canary deploys -and blue/green deploys. Kubernetes is very popular, especially for web -applications. - -Apache Mesos is a COE that has been around longer than Kubernetes or Swarm. It -allows for a variety of different frameworks to be used along with it, -including Marathon, Aurora, Chronos, Hadoop, and `a number of others. -`_ - -The Apache Mesos framework design can be used to run alternate COE software -directly on Mesos. Although this approach is not widely used yet, it may soon -be possible to run Mesos with Kubernetes and Swarm as frameworks, allowing -you to share the resources of a cluster between multiple different COEs. Until -this option matures, we encourage Magnum users to create multiple clusters, and -use the COE in each cluster that best fits the anticipated workload. - -Finding the right COE for your workload is up to you, but Magnum offers you a -choice to select among the prevailing leading options. Once you decide, see -the next sections for examples of how to create a cluster with your desired -COE. - -============== -Native Clients -============== - -Magnum preserves the native user experience with a COE and does not -provide a separate API or client. This means you will need to use the -native client for the particular cluster type to interface with the -clusters. In the typical case, there are two clients to consider: - -COE level - This is the orchestration or management level such as Kubernetes, - Swarm, Mesos and its frameworks. - -Container level - This is the low level container operation. Currently it is - Docker for all clusters. - -The clients can be CLI and/or browser-based. You will need to refer -to the documentation for the specific native client and appropriate -version for details, but following are some pointers for reference. - -Kubernetes CLI is the tool 'kubectl', which can be simply copied from -a node in the cluster or downloaded from the Kubernetes release. For -instance, if the cluster is running Kubernetes release 1.2.0, the -binary for 'kubectl' can be downloaded as and set up locally as -follows:: - - curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl - chmod +x kubectl - sudo mv kubectl /usr/local/bin/kubectl - -Kubernetes also provides a browser UI. If the cluster has the -Kubernetes Dashboard running; it can be accessed using:: - - eval $(magnum cluster-config ) - kubectl proxy - - The browser can be accessed at http://localhost:8001/ui - -For Swarm, the main CLI is 'docker', along with associated tools -such as 'docker-compose', etc. Specific version of the binaries can -be obtained from the `Docker Engine installation -`_. - -Mesos cluster uses the Marathon framework and details on the Marathon -UI can be found in the section `Using Marathon`_. - -Depending on the client requirement, you may need to use a version of -the client that matches the version in the cluster. To determine the -version of the COE and container, use the command 'cluster-show' and -look for the attribute *coe_version* and *container_version*:: - - magnum cluster-show k8s-cluster - +--------------------+------------------------------------------------------------+ - | Property | Value | - +--------------------+------------------------------------------------------------+ - | status | CREATE_COMPLETE | - | uuid | 04952c60-a338-437f-a7e7-d016d1d00e65 | - | stack_id | b7bf72ce-b08e-4768-8201-e63a99346898 | - | status_reason | Stack CREATE completed successfully | - | created_at | 2016-07-25T23:14:06+00:00 | - | updated_at | 2016-07-25T23:14:10+00:00 | - | create_timeout | 60 | - | coe_version | v1.2.0 | - | api_address | https://192.168.19.86:6443 | - | cluster_template_id| da2825a0-6d09-4208-b39e-b2db666f1118 | - | master_addresses | ['192.168.19.87'] | - | node_count | 1 | - | node_addresses | ['192.168.19.88'] | - | master_count | 1 | - | container_version | 1.9.1 | - | discovery_url | https://discovery.etcd.io/3b7fb09733429d16679484673ba3bfd5 | - | name | k8s-cluster | - +--------------------+------------------------------------------------------------+ - - -========== -Kubernetes -========== -Kubernetes uses a range of terminology that we refer to in this guide. We -define these common terms for your reference: - -Pod - When using the Kubernetes container orchestration engine, a pod is the - smallest deployable unit that can be created and managed. A pod is a - co-located group of application containers that run with a shared context. - When using Magnum, pods are created and managed within clusters. Refer to the - `pods section - `_ in the `Kubernetes - User Guide`_ for more information. - -Replication controller - A replication controller is used to ensure that at any given time a certain - number of replicas of a pod are running. Pods are automatically created and - deleted by the replication controller as necessary based on a template to - ensure that the defined number of replicas exist. Refer to the `replication - controller section - `_ in - the `Kubernetes User Guide`_ for more information. - -Service - A service is an additional layer of abstraction provided by the Kubernetes - container orchestration engine which defines a logical set of pods and a - policy for accessing them. This is useful because pods are created and - deleted by a replication controller, for example, other pods needing to - discover them can do so via the service abstraction. Refer to the - `services section - `_ in the - `Kubernetes User Guide`_ for more information. - -.. _Kubernetes User Guide: http://kubernetes.io/v1.0/docs/user-guide/ - -When Magnum deploys a Kubernetes cluster, it uses parameters defined in the -ClusterTemplate and specified on the cluster-create command, for example:: - - magnum cluster-template-create k8s-cluster-template \ - --image fedora-atomic-latest \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --docker-volume-size 5 \ - --network-driver flannel \ - --coe kubernetes - - magnum cluster-create k8s-cluster \ - --cluster-template k8s-cluster-template \ - --master-count 3 \ - --node-count 8 - -Refer to the `ClusterTemplate`_ and `Cluster`_ sections for the full list of -parameters. Following are further details relevant to a Kubernetes cluster: - -Number of masters (master-count) - Specified in the cluster-create command to indicate how many servers will - run as master in the cluster. Having more than one will provide high - availability. The masters will be in a load balancer pool and the - virtual IP address (VIP) of the load balancer will serve as the - Kubernetes API endpoint. For external access, a floating IP - associated with this VIP is available and this is the endpoint - shown for Kubernetes in the 'cluster-show' command. - -Number of nodes (node-count) - Specified in the cluster-create command to indicate how many servers will - run as node in the cluster to host the users' pods. The nodes are registered - in Kubernetes using the Nova instance name. - -Network driver (network-driver) - Specified in the ClusterTemplate to select the network driver. - The supported and default network driver is 'flannel', an overlay - network providing a flat network for all pods. Refer to the - `Networking`_ section for more details. - -Volume driver (volume-driver) - Specified in the ClusterTemplate to select the volume driver. The supported - volume driver is 'cinder', allowing Cinder volumes to be mounted in - containers for use as persistent storage. Data written to these volumes - will persist after the container exits and can be accessed again from other - containers, while data written to the union file system hosting the container - will be deleted. Refer to the `Storage`_ section for more details. - -Storage driver (docker-storage-driver) - Specified in the ClusterTemplate to select the Docker storage driver. The - supported storage drivers are 'devicemapper' and 'overlay', with - 'devicemapper' being the default. Refer to the `Storage`_ section for more - details. - -Image (image) - Specified in the ClusterTemplate to indicate the image to boot the servers. - The image binary is loaded in Glance with the attribute - 'os_distro = fedora-atomic'. - Current supported images are Fedora Atomic (download from `Fedora - `_ ) - and CoreOS (download from `CoreOS - `_ ) - -TLS (tls-disabled) - Transport Layer Security is enabled by default, so you need a key and - signed certificate to access the Kubernetes API and CLI. Magnum - handles its own key and certificate when interfacing with the - Kubernetes cluster. In development mode, TLS can be disabled. Refer to - the 'Transport Layer Security'_ section for more details. - -What runs on the servers - The servers for Kubernetes master host containers in the 'kube-system' - name space to run the Kubernetes proxy, scheduler and controller manager. - The masters will not host users' pods. Kubernetes API server, docker - daemon, etcd and flannel run as systemd services. The servers for - Kubernetes node also host a container in the 'kube-system' name space - to run the Kubernetes proxy, while Kubernetes kubelet, docker daemon - and flannel run as systemd services. - -Log into the servers - You can log into the master servers using the login 'fedora' and the - keypair specified in the ClusterTemplate. - -In addition to the common attributes in the ClusterTemplate, you can specify -the following attributes that are specific to Kubernetes by using the -labels attribute. - -_`admission_control_list` - This label corresponds to Kubernetes parameter for the API server '--admission-control'. - For more details, refer to the `Admission Controllers - `_. - The default value corresponds to the one recommended in this doc - for our current Kubernetes version. - -_`etcd_volume_size` - This label sets the size of a volume holding the etcd storage data. - The default value is 0, meaning the etcd data is not persisted (no volume). - -External load balancer for services ------------------------------------ - -All Kubernetes pods and services created in the cluster are assigned IP -addresses on a private container network so they can access each other -and the external internet. However, these IP addresses are not -accessible from an external network. - -To publish a service endpoint externally so that the service can be -accessed from the external network, Kubernetes provides the external -load balancer feature. This is done by simply specifying in the -service manifest the attribute "type: LoadBalancer". Magnum enables -and configures the Kubernetes plugin for OpenStack so that it can -interface with Neutron and manage the necessary networking resources. - -When the service is created, Kubernetes will add an external load -balancer in front of the service so that the service will have an -external IP address in addition to the internal IP address on the -container network. The service endpoint can then be accessed with -this external IP address. Kubernetes handles all the life cycle -operations when pods are modified behind the service and when the -service is deleted. - -Refer to the document `Kubernetes external load balancer -`_ -for more details. - - -===== -Swarm -===== - -A Swarm cluster is a pool of servers running Docker daemon that is -managed as a single Docker host. One or more Swarm managers accepts -the standard Docker API and manage this pool of servers. -Magnum deploys a Swarm cluster using parameters defined in -the ClusterTemplate and specified on the 'cluster-create' command, for -example:: - - magnum cluster-template-create swarm-cluster-template \ - --image fedora-atomic-latest \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --docker-volume-size 5 \ - --coe swarm - - magnum cluster-create swarm-cluster \ - --cluster-template swarm-cluster-template \ - --master-count 3 \ - --node-count 8 - -Refer to the `ClusterTemplate`_ and `Cluster`_ sections for the full list of -parameters. Following are further details relevant to Swarm: - -What runs on the servers - There are two types of servers in the Swarm cluster: managers and nodes. - The Docker daemon runs on all servers. On the servers for manager, - the Swarm manager is run as a Docker container on port 2376 and this - is initiated by the systemd service swarm-manager. Etcd is also run - on the manager servers for discovery of the node servers in the cluster. - On the servers for node, the Swarm agent is run as a Docker - container on port 2375 and this is initiated by the systemd service - swarm-agent. On start up, the agents will register themselves in - etcd and the managers will discover the new node to manage. - -Number of managers (master-count) - Specified in the cluster-create command to indicate how many servers will - run as managers in the cluster. Having more than one will provide high - availability. The managers will be in a load balancer pool and the - load balancer virtual IP address (VIP) will serve as the Swarm API - endpoint. A floating IP associated with the load balancer VIP will - serve as the external Swarm API endpoint. The managers accept - the standard Docker API and perform the corresponding operation on the - servers in the pool. For instance, when a new container is created, - the managers will select one of the servers based on some strategy - and schedule the containers there. - -Number of nodes (node-count) - Specified in the cluster-create command to indicate how many servers will - run as nodes in the cluster to host your Docker containers. These servers - will register themselves in etcd for discovery by the managers, and - interact with the managers. Docker daemon is run locally to host - containers from users. - -Network driver (network-driver) - Specified in the ClusterTemplate to select the network driver. The supported - drivers are 'docker' and 'flannel', with 'docker' as the default. - With the 'docker' driver, containers are connected to the 'docker0' - bridge on each node and are assigned local IP address. With the - 'flannel' driver, containers are connected to a flat overlay network - and are assigned IP address by Flannel. Refer to the `Networking`_ - section for more details. - -Volume driver (volume-driver) - Specified in the ClusterTemplate to select the volume driver to provide - persistent storage for containers. The supported volume driver is - 'rexray'. The default is no volume driver. When 'rexray' or other - volume driver is deployed, you can use the Docker 'volume' command to - create, mount, unmount, delete volumes in containers. Cinder block - storage is used as the backend to support this feature. - Refer to the `Storage`_ section for more details. - -Storage driver (docker-storage-driver) - Specified in the ClusterTemplate to select the Docker storage driver. The - supported storage driver are 'devicemapper' and 'overlay', with - 'devicemapper' being the default. Refer to the `Storage`_ section for more - details. - -Image (image) - Specified in the ClusterTemplate to indicate the image to boot the servers - for the Swarm manager and node. - The image binary is loaded in Glance with the attribute - 'os_distro = fedora-atomic'. - Current supported image is Fedora Atomic (download from `Fedora - `_ ) - -TLS (tls-disabled) - Transport Layer Security is enabled by default to secure the Swarm API for - access by both the users and Magnum. You will need a key and a - signed certificate to access the Swarm API and CLI. Magnum - handles its own key and certificate when interfacing with the - Swarm cluster. In development mode, TLS can be disabled. Refer to - the 'Transport Layer Security'_ section for details on how to create your - key and have Magnum sign your certificate. - -Log into the servers - You can log into the manager and node servers with the account 'fedora' and - the keypair specified in the ClusterTemplate. - -In addition to the common attributes in the ClusterTemplate, you can specify -the following attributes that are specific to Swarm by using the -labels attribute. - -_`swarm_strategy` - This label corresponds to Swarm parameter for master '--strategy'. - For more details, refer to the `Swarm Strategy - `_. - Valid values for this label are: - - - spread - - binpack - - random - -===== -Mesos -===== - -A Mesos cluster consists of a pool of servers running as Mesos slaves, -managed by a set of servers running as Mesos masters. Mesos manages -the resources from the slaves but does not itself deploy containers. -Instead, one of more Mesos frameworks running on the Mesos cluster would -accept user requests on their own endpoint, using their particular -API. These frameworks would then negotiate the resources with Mesos -and the containers are deployed on the servers where the resources are -offered. - -Magnum deploys a Mesos cluster using parameters defined in the ClusterTemplate -and specified on the 'cluster-create' command, for example:: - - magnum cluster-template-create mesos-cluster-template \ - --image ubuntu-mesos \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --coe mesos - - magnum cluster-create mesos-cluster \ - --cluster-template mesos-cluster-template \ - --master-count 3 \ - --node-count 8 - -Refer to the `ClusterTemplate`_ and `Cluster`_ sections for the full list of -parameters. Following are further details relevant to Mesos: - -What runs on the servers - There are two types of servers in the Mesos cluster: masters and slaves. - The Docker daemon runs on all servers. On the servers for master, - the Mesos master is run as a process on port 5050 and this is - initiated by the upstart service 'mesos-master'. Zookeeper is also - run on the master servers, initiated by the upstart service - 'zookeeper'. Zookeeper is used by the master servers for electing - the leader among the masters, and by the slave servers and - frameworks to determine the current leader. The framework Marathon - is run as a process on port 8080 on the master servers, initiated by - the upstart service 'marathon'. On the servers for slave, the Mesos - slave is run as a process initiated by the upstart service - 'mesos-slave'. - -Number of master (master-count) - Specified in the cluster-create command to indicate how many servers - will run as masters in the cluster. Having more than one will provide - high availability. If the load balancer option is specified, the - masters will be in a load balancer pool and the load balancer - virtual IP address (VIP) will serve as the Mesos API endpoint. A - floating IP associated with the load balancer VIP will serve as the - external Mesos API endpoint. - -Number of agents (node-count) - Specified in the cluster-create command to indicate how many servers - will run as Mesos slave in the cluster. Docker daemon is run locally to - host containers from users. The slaves report their available - resources to the master and accept request from the master to deploy - tasks from the frameworks. In this case, the tasks will be to - run Docker containers. - -Network driver (network-driver) - Specified in the ClusterTemplate to select the network driver. Currently - 'docker' is the only supported driver: containers are connected to - the 'docker0' bridge on each node and are assigned local IP address. - Refer to the `Networking`_ section for more details. - -Volume driver (volume-driver) - Specified in the ClusterTemplate to select the volume driver to provide - persistent storage for containers. The supported volume driver is - 'rexray'. The default is no volume driver. When 'rexray' or other - volume driver is deployed, you can use the Docker 'volume' command to - create, mount, unmount, delete volumes in containers. Cinder block - storage is used as the backend to support this feature. - Refer to the `Storage`_ section for more details. - -Storage driver (docker-storage-driver) - This is currently not supported for Mesos. - -Image (image) - - Specified in the ClusterTemplate to indicate the image to boot the servers - for the Mesos master and slave. The image binary is loaded in - Glance with the attribute 'os_distro = ubuntu'. You can download - the `ready-built image - `_, - or you can create the image as described below in the `Building - Mesos image`_ section. - -TLS (tls-disabled) - Transport Layer Security is currently not implemented yet for Mesos. - -Log into the servers - You can log into the manager and node servers with the account - 'ubuntu' and the keypair specified in the ClusterTemplate. - -In addition to the common attributes in the baymodel, you can specify -the following attributes that are specific to Mesos by using the -labels attribute. - -_`rexray_preempt` - When the volume driver 'rexray' is used, you can mount a data volume - backed by Cinder to a host to be accessed by a container. In this - case, the label 'rexray_preempt' can optionally be set to True or - False to enable any host to take control of the volume regardless of - whether other hosts are using the volume. This will in effect - unmount the volume from the current host and remount it on the new - host. If this label is set to false, then rexray will ensure data - safety for locking the volume before remounting. The default value - is False. - -_`mesos_slave_isolation` - This label corresponds to the Mesos parameter for slave - '--isolation'. The isolators are needed to provide proper isolation - according to the runtime configurations specified in the container - image. For more details, refer to the `Mesos configuration - `_ - and the `Mesos container image support - `_. - Valid values for this label are: - - - filesystem/posix - - filesystem/linux - - filesystem/shared - - posix/cpu - - posix/mem - - posix/disk - - cgroups/cpu - - cgroups/mem - - docker/runtime - - namespaces/pid - -_`mesos_slave_image_providers` - This label corresponds to the Mesos parameter for agent - '--image_providers', which tells Mesos containerizer what - types of container images are allowed. - For more details, refer to the `Mesos configuration - `_ and - the `Mesos container image support - `_. - Valid values are: - - - appc - - docker - - appc,docker - -_`mesos_slave_work_dir` - This label corresponds to the Mesos parameter '--work_dir' for slave. - For more details, refer to the `Mesos configuration - `_. - Valid value is a directory path to use as the work directory for - the framework, for example:: - - mesos_slave_work_dir=/tmp/mesos - -_`mesos_slave_executor_env_variables` - This label corresponds to the Mesos parameter for slave - '--executor_environment_variables', which passes additional - environment variables to the executor and subsequent tasks. - For more details, refer to the `Mesos configuration - `_. - Valid value is the name of a JSON file, for example:: - - mesos_slave_executor_env_variables=/home/ubuntu/test.json - - The JSON file should contain environment variables, for example:: - - { - "PATH": "/bin:/usr/bin", - "LD_LIBRARY_PATH": "/usr/local/lib" - } - - By default the executor will inherit the slave's environment - variables. - - -Building Mesos image --------------------- - -The boot image for Mesos cluster is an Ubuntu 14.04 base image with the -following middleware pre-installed: - -- ``docker`` -- ``zookeeper`` -- ``mesos`` -- ``marathon`` - -The cluster driver provides two ways to create this image, as follows. - -Diskimage-builder -+++++++++++++++++ - -To run the `diskimage-builder -`__ tool -manually, use the provided `elements -`__. -Following are the typical steps to use the diskimage-builder tool on -an Ubuntu server:: - - $ sudo apt-get update - $ sudo apt-get install git qemu-utils python-pip - $ sudo pip install diskimage-builder - - $ git clone https://git.openstack.org/openstack/magnum - $ git clone https://git.openstack.org/openstack/dib-utils.git - $ git clone https://git.openstack.org/openstack/tripleo-image-elements.git - $ git clone https://git.openstack.org/openstack/heat-templates.git - $ export PATH="${PWD}/dib-utils/bin:$PATH" - $ export ELEMENTS_PATH=tripleo-image-elements/elements:heat-templates/hot/software-config/elements:magnum/magnum/drivers/mesos_ubuntu_v1/image/mesos - $ export DIB_RELEASE=trusty - - $ disk-image-create ubuntu vm docker mesos \ - os-collect-config os-refresh-config os-apply-config \ - heat-config heat-config-script \ - -o ubuntu-mesos.qcow2 - -Dockerfile -++++++++++ - -To build the image as above but within a Docker container, use the -provided `Dockerfile -`__. -The output image will be saved as '/tmp/ubuntu-mesos.qcow2'. -Following are the typical steps to run a Docker container to build the image:: - - $ git clone https://git.openstack.org/openstack/magnum - $ cd magnum/magnum/drivers/mesos_ubuntu_v1/image - $ sudo docker build -t magnum/mesos-builder . - $ sudo docker run -v /tmp:/output --rm -ti --privileged magnum/mesos-builder - ... - Image file /output/ubuntu-mesos.qcow2 created... - - -Using Marathon --------------- - -Marathon is a Mesos framework for long running applications. Docker -containers can be deployed via Marathon's REST API. To get the -endpoint for Marathon, run the cluster-show command and look for the -property 'api_address'. Marathon's endpoint is port 8080 on this IP -address, so the web console can be accessed at:: - - http://:8080/ - -Refer to Marathon documentation for details on running applications. -For example, you can 'post' a JSON app description to -``http://:8080/apps`` to deploy a Docker container:: - - $ cat > app.json << END - { - "container": { - "type": "DOCKER", - "docker": { - "image": "libmesos/ubuntu" - } - }, - "id": "ubuntu", - "instances": 1, - "cpus": 0.5, - "mem": 512, - "uris": [], - "cmd": "while sleep 10; do date -u +%T; done" - } - END - $ API_ADDRESS=$(magnum cluster-show mesos-cluster | awk '/ api_address /{print $4}') - $ curl -X POST -H "Content-Type: application/json" \ - http://${API_ADDRESS}:8080/v2/apps -d@app.json - - -======================== -Transport Layer Security -======================== - -Magnum uses TLS to secure communication between a cluster's services and -the outside world. TLS is a complex subject, and many guides on it -exist already. This guide will not attempt to fully describe TLS, but -instead will only cover the necessary steps to get a client set up to -talk to a cluster with TLS. A more in-depth guide on TLS can be found in -the `OpenSSL Cookbook -`_ by Ivan Ristić. - -TLS is employed at 3 points in a cluster: - -1. By Magnum to communicate with the cluster API endpoint - -2. By the cluster worker nodes to communicate with the master nodes - -3. By the end-user when they use the native client libraries to - interact with the cluster. This applies to both a CLI or a program - that uses a client for the particular cluster. Each client needs a - valid certificate to authenticate and communicate with a cluster. - -The first two cases are implemented internally by Magnum and are not -exposed to the users, while the last case involves the users and is -described in more details below. - - -Deploying a secure cluster --------------------------- - -Current TLS support is summarized below: - -+------------+-------------+ -| COE | TLS support | -+============+=============+ -| Kubernetes | yes | -+------------+-------------+ -| Swarm | yes | -+------------+-------------+ -| Mesos | no | -+------------+-------------+ - -For cluster type with TLS support, e.g. Kubernetes and Swarm, TLS is -enabled by default. To disable TLS in Magnum, you can specify the -parameter '--tls-disabled' in the ClusterTemplate. Please note it is not -recommended to disable TLS due to security reasons. - -In the following example, Kubernetes is used to illustrate a secure -cluster, but the steps are similar for other cluster types that have TLS -support. - -First, create a ClusterTemplate; by default TLS is enabled in -Magnum, therefore it does not need to be specified via a parameter:: - - magnum cluster-template-create secure-kubernetes \ - --keypair default \ - --external-network public \ - --image fedora-atomic-latest \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --docker-volume-size 3 \ - --coe kubernetes \ - --network-driver flannel - - +-----------------------+--------------------------------------+ - | Property | Value | - +-----------------------+--------------------------------------+ - | insecure_registry | None | - | http_proxy | None | - | updated_at | None | - | master_flavor_id | None | - | uuid | 5519b24a-621c-413c-832f-c30424528b31 | - | no_proxy | None | - | https_proxy | None | - | tls_disabled | False | - | keypair_id | time4funkey | - | public | False | - | labels | {} | - | docker_volume_size | 5 | - | server_type | vm | - | external_network_id | public | - | cluster_distro | fedora-atomic | - | image_id | fedora-atomic-latest | - | volume_driver | None | - | registry_enabled | False | - | docker_storage_driver | devicemapper | - | apiserver_port | None | - | name | secure-kubernetes | - | created_at | 2016-07-25T23:09:50+00:00 | - | network_driver | flannel | - | fixed_network | None | - | coe | kubernetes | - | flavor_id | m1.small | - | dns_nameserver | 8.8.8.8 | - +-----------------------+--------------------------------------+ - - -Now create a cluster. Use the ClusterTemplate name as a template for cluster -creation:: - - magnum cluster-create secure-k8s-cluster \ - --cluster-template secure-kubernetes \ - --node-count 1 - - +--------------------+------------------------------------------------------------+ - | Property | Value | - +--------------------+------------------------------------------------------------+ - | status | CREATE_IN_PROGRESS | - | uuid | 3968ffd5-678d-4555-9737-35f191340fda | - | stack_id | c96b66dd-2109-4ae2-b510-b3428f1e8761 | - | status_reason | None | - | created_at | 2016-07-25T23:14:06+00:00 | - | updated_at | None | - | create_timeout | 0 | - | api_address | None | - | coe_version | - | - | cluster_template_id| 5519b24a-621c-413c-832f-c30424528b31 | - | master_addresses | None | - | node_count | 1 | - | node_addresses | None | - | master_count | 1 | - | container_version | - | - | discovery_url | https://discovery.etcd.io/ba52a8178e7364d43a323ee4387cf28e | - | name | secure-k8s-cluster | - +--------------------+------------------------------------------------------------+ - - -Now run cluster-show command to get the details of the cluster and verify that -the api_address is 'https':: - - magnum cluster-show secure-k8scluster - +--------------------+------------------------------------------------------------+ - | Property | Value | - +--------------------+------------------------------------------------------------+ - | status | CREATE_COMPLETE | - | uuid | 04952c60-a338-437f-a7e7-d016d1d00e65 | - | stack_id | b7bf72ce-b08e-4768-8201-e63a99346898 | - | status_reason | Stack CREATE completed successfully | - | created_at | 2016-07-25T23:14:06+00:00 | - | updated_at | 2016-07-25T23:14:10+00:00 | - | create_timeout | 60 | - | coe_version | v1.2.0 | - | api_address | https://192.168.19.86:6443 | - | cluster_template_id| da2825a0-6d09-4208-b39e-b2db666f1118 | - | master_addresses | ['192.168.19.87'] | - | node_count | 1 | - | node_addresses | ['192.168.19.88'] | - | master_count | 1 | - | container_version | 1.9.1 | - | discovery_url | https://discovery.etcd.io/3b7fb09733429d16679484673ba3bfd5 | - | name | secure-k8s-cluster | - +--------------------+------------------------------------------------------------+ - -You can see the api_address contains https in the URL, showing that -the Kubernetes services are configured securely with SSL certificates -and now any communication to kube-apiserver will be over https. - - -Interfacing with a secure cluster ---------------------------------- - -To communicate with the API endpoint of a secure cluster, you will need so -supply 3 SSL artifacts: - -1. Your client key -2. A certificate for your client key that has been signed by a - Certificate Authority (CA) -3. The certificate of the CA - -There are two ways to obtain these 3 artifacts. - -Automated -+++++++++ - -Magnum provides the command 'cluster-config' to help the user in setting -up the environment and artifacts for TLS, for example:: - - magnum cluster-config swarm-cluster --dir myclusterconfig - -This will display the necessary environment variables, which you -can add to your environment:: - - export DOCKER_HOST=tcp://172.24.4.5:2376 - export DOCKER_CERT_PATH=myclusterconfig - export DOCKER_TLS_VERIFY=True - -And the artifacts are placed in the directory specified:: - - ca.pem - cert.pem - key.pem - -You can now use the native client to interact with the COE. -The variables and artifacts are unique to the cluster. - -The parameters for 'bay-config' are as follows: - ---dir \ - Directory to save the certificate and config files. - ---force - Overwrite existing files in the directory specified. - - -Manual -++++++ - -You can create the key and certificates manually using the following steps. - -Client Key - Your personal private key is essentially a cryptographically generated - string of bytes. It should be protected in the same manner as a - password. To generate an RSA key, you can use the 'genrsa' command of - the 'openssl' tool:: - - openssl genrsa -out key.pem 4096 - - This command generates a 4096 byte RSA key at key.pem. - -Signed Certificate - To authenticate your key, you need to have it signed by a CA. First - generate the Certificate Signing Request (CSR). The CSR will be - used by Magnum to generate a signed certificate that you will use to - communicate with the cluster. To generate a CSR, openssl requires a - config file that specifies a few values. Using the example template - below, you can fill in the 'CN' value with your name and save it as - client.conf:: - - $ cat > client.conf << END - [req] - distinguished_name = req_distinguished_name - req_extensions = req_ext - prompt = no - [req_distinguished_name] - CN = Your Name - [req_ext] - extendedKeyUsage = clientAuth - END - - Once you have client.conf, you can run the openssl 'req' command to - generate the CSR:: - - openssl req -new -days 365 \ - -config client.conf \ - -key key.pem \ - -out client.csr - - Now that you have your client CSR, you can use the Magnum CLI to - send it off to Magnum to get it signed:: - - magnum ca-sign --cluster secure-k8s-cluster --csr client.csr > cert.pem - -Certificate Authority - The final artifact you need to retrieve is the CA certificate for - the cluster. This is used by your native client to ensure you are only - communicating with hosts that Magnum set up:: - - magnum ca-show --cluster secure-k8s-cluster > ca.pem - -Rotate Certificate - To rotate the CA certificate for a cluster and invalidate all user - certificates, you can use the following command:: - - magnum ca-rotate --cluster secure-k8s-cluster - -User Examples -------------- - -Here are some examples for using the CLI on a secure Kubernetes and -Swarm cluster. You can perform all the TLS set up automatically by:: - - eval $(magnum cluster-config ) - -Or you can perform the manual steps as described above and specify -the TLS options on the CLI. The SSL artifacts are assumed to be -saved in local files as follows:: - -- key.pem: your SSL key -- cert.pem: signed certificate -- ca.pem: certificate for cluster CA - - -For Kubernetes, you need to get 'kubectl', a kubernetes CLI tool, to -communicate with the cluster:: - - curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.0/bin/linux/amd64/kubectl - chmod +x kubectl - sudo mv kubectl /usr/local/bin/kubectl - -Now let's run some 'kubectl' commands to check the secure communication. -If you used 'cluster-config', then you can simply run the 'kubectl' command -without having to specify the TLS options since they have been defined -in the environment:: - - kubectl version - Client Version: version.Info{Major:"1", Minor:"0", GitVersion:"v1.2.0", GitCommit:"cffae0523cfa80ddf917aba69f08508b91f603d5", GitTreeState:"clean"} - Server Version: version.Info{Major:"1", Minor:"0", GitVersion:"v1.2.0", GitCommit:"cffae0523cfa80ddf917aba69f08508b91f603d5", GitTreeState:"clean"} - -You can specify the TLS options manually as follows:: - - KUBERNETES_URL=$(magnum cluster-show secure-k8s-cluster | - awk '/ api_address /{print $4}') - kubectl version --certificate-authority=ca.pem \ - --client-key=key.pem \ - --client-certificate=cert.pem -s $KUBERNETES_URL - - kubectl create -f redis-master.yaml --certificate-authority=ca.pem \ - --client-key=key.pem \ - --client-certificate=cert.pem -s $KUBERNETES_URL - - pods/test2 - - kubectl get pods --certificate-authority=ca.pem \ - --client-key=key.pem \ - --client-certificate=cert.pem -s $KUBERNETES_URL - NAME READY STATUS RESTARTS AGE - redis-master 2/2 Running 0 1m - -Beside using the environment variables, you can also configure 'kubectl' -to remember the TLS options:: - - kubectl config set-cluster secure-k8s-cluster --server=${KUBERNETES_URL} \ - --certificate-authority=${PWD}/ca.pem - kubectl config set-credentials client --certificate-authority=${PWD}/ca.pem \ - --client-key=${PWD}/key.pem --client-certificate=${PWD}/cert.pem - kubectl config set-context secure-k8scluster --cluster=secure-k8scluster --user=client - kubectl config use-context secure-k8scluster - -Then you can use 'kubectl' commands without the certificates:: - - kubectl get pods - NAME READY STATUS RESTARTS AGE - redis-master 2/2 Running 0 1m - -Access to Kubernetes User Interface:: - - curl -L ${KUBERNETES_URL}/ui --cacert ca.pem --key key.pem \ - --cert cert.pem - -You may also set up 'kubectl' proxy which will use your client -certificates to allow you to browse to a local address to use the UI -without installing a certificate in your browser:: - - kubectl proxy --api-prefix=/ --certificate-authority=ca.pem --client-key=key.pem \ - --client-certificate=cert.pem -s $KUBERNETES_URL - -You can then open http://localhost:8001/ui in your browser. - -The examples for Docker are similar. With 'cluster-config' set up, -you can just run docker commands without TLS options. To specify the -TLS options manually:: - - docker -H tcp://192.168.19.86:2376 --tlsverify \ - --tlscacert ca.pem \ - --tlskey key.pem \ - --tlscert cert.pem \ - info - - -Storing the certificates ------------------------- - -Magnum generates and maintains a certificate for each cluster so that it -can also communicate securely with the cluster. As a result, it is -necessary to store the certificates in a secure manner. Magnum -provides the following methods for storing the certificates and this -is configured in /etc/magnum/magnum.conf in the section [certificates] -with the parameter 'cert_manager_type'. - -1. Barbican: - Barbican is a service in OpenStack for storing secrets. It is used - by Magnum to store the certificates when cert_manager_type is - configured as:: - - cert_manager_type = barbican - - This is the recommended configuration for a production environment. - Magnum will interface with Barbican to store and retrieve - certificates, delegating the task of securing the certificates to - Barbican. - -2. Magnum database: - In some cases, a user may want an alternative to storing the - certificates that does not require Barbican. This can be a - development environment, or a private cloud that has been secured - by other means. Magnum can store the certificates in its own - database; this is done with the configuration:: - - cert_manager_type = x509keypair - - This storage mode is only as secure as the controller server that - hosts the database for the OpenStack services. - -3. Local store: - As another alternative that does not require Barbican, Magnum can - simply store the certificates on the local host filesystem where the - conductor is running, using the configuration:: - - cert_manager_type = local - - Note that this mode is only supported when there is a single Magnum - conductor running since the certificates are stored locally. The - 'local' mode is not recommended for a production environment. - -For the nodes, the certificates for communicating with the masters are -stored locally and the nodes are assumed to be secured. - - -========== -Networking -========== - -There are two components that make up the networking in a cluster. - -1. The Neutron infrastructure for the cluster: this includes the - private network, subnet, ports, routers, load balancers, etc. - -2. The networking model presented to the containers: this is what the - containers see in communicating with each other and to the external - world. Typically this consists of a driver deployed on each node. - -The two components are deployed and managed separately. The Neutron -infrastructure is the integration with OpenStack; therefore, it -is stable and more or less similar across different COE -types. The networking model, on the other hand, is specific to the -COE type and is still under active development in the various -COE communities, for example, -`Docker libnetwork `_ and -`Kubernetes Container Networking -`_. -As a result, the implementation for the networking models is evolving and -new models are likely to be introduced in the future. - -For the Neutron infrastructure, the following configuration can -be set in the ClusterTemplate: - -external-network - The external Neutron network ID to connect to this cluster. This - is used to connect the cluster to the external internet, allowing - the nodes in the cluster to access external URL for discovery, image - download, etc. If not specified, the default value is "public" and this - is valid for a typical devstack. - -fixed-network - The Neutron network to use as the private network for the cluster nodes. - If not specified, a new Neutron private network will be created. - -dns-nameserver - The DNS nameserver to use for this cluster. This is an IP address for - the server and it is used to configure the Neutron subnet of the - cluster (dns_nameservers). If not specified, the default DNS is - 8.8.8.8, the publicly available DNS. - -http-proxy, https-proxy, no-proxy - The proxy for the nodes in the cluster, to be used when the cluster is - behind a firewall and containers cannot access URL's on the external - internet directly. For the parameter http-proxy and https-proxy, the - value to provide is a URL and it will be set in the environment - variable HTTP_PROXY and HTTPS_PROXY respectively in the nodes. For - the parameter no-proxy, the value to provide is an IP or list of IP's - separated by comma. Likewise, the value will be set in the - environment variable NO_PROXY in the nodes. - -For the networking model to the container, the following configuration -can be set in the ClusterTemplate: - -network-driver - The network driver name for instantiating container networks. - Currently, the following network drivers are supported: - - +--------+-------------+-----------+-------------+ - | Driver | Kubernetes | Swarm | Mesos | - +========+=============+===========+=============+ - | Flannel| supported | supported | unsupported | - +--------+-------------+-----------+-------------+ - | Docker | unsupported | supported | supported | - +--------+-------------+-----------+-------------+ - - If not specified, the default driver is Flannel for Kubernetes, and - Docker for Swarm and Mesos. - -Particular network driver may require its own set of parameters for -configuration, and these parameters are specified through the labels -in the ClusterTemplate. Labels are arbitrary key=value pairs. - -When Flannel is specified as the network driver, the following -optional labels can be added: - -_`flannel_network_cidr` - IPv4 network in CIDR format to use for the entire Flannel network. - If not specified, the default is 10.100.0.0/16. - -_`flannel_network_subnetlen` - The size of the subnet allocated to each host. If not specified, the - default is 24. - -_`flannel_backend` - The type of backend for Flannel. Possible values are *udp, vxlan, - host-gw*. If not specified, the default is *udp*. Selecting the - best backend depends on your networking. Generally, *udp* is - the most generally supported backend since there is little - requirement on the network, but it typically offers the lowest - performance. The *vxlan* backend performs better, but requires - vxlan support in the kernel so the image used to provision the - nodes needs to include this support. The *host-gw* backend offers - the best performance since it does not actually encapsulate - messages, but it requires all the nodes to be on the same L2 - network. The private Neutron network that Magnum creates does - meet this requirement; therefore if the parameter *fixed_network* - is not specified in the ClusterTemplate, *host-gw* is the best choice for - the Flannel backend. - - -================= -High Availability -================= -*To be filled in* - -======= -Scaling -======= - -Performance tuning for periodic task ------------------------------------- - -Magnum's periodic task performs a `stack-get` operation on the Heat stack -underlying each of its clusters. If you have a large amount of clusters this -can create considerable load on the Heat API. To reduce that load you can -configure Magnum to perform one global `stack-list` per periodic task instead -of one per cluster. This is disabled by default, both from the Heat and Magnum -side since it causes a security issue, though: any user in any tenant holding -the `admin` role can perform a global `stack-list` operation if Heat is -configured to allow it for Magnum. If you want to enable it nonetheless, -proceed as follows: - -1. Set `periodic_global_stack_list` in magnum.conf to `True` - (`False` by default). - -2. Update heat policy to allow magnum list stacks. To this end, edit your heat - policy file, usually etc/heat/policy.json``: - - .. code-block:: ini - - ... - stacks:global_index: "rule:context_is_admin", - - Now restart heat. - - -Containers and nodes --------------------- - -Scaling containers and nodes refers to increasing or decreasing -allocated system resources. Scaling is a broad topic and involves -many dimensions. In the context of Magnum in this guide, we consider -the following issues: - -- Scaling containers and scaling cluster nodes (infrastructure) -- Manual and automatic scaling - -Since this is an active area of development, a complete solution -covering all issues does not exist yet, but partial solutions are -emerging. - -Scaling containers involves managing the number of instances of the -container by replicating or deleting instances. This can be used to -respond to change in the workload being supported by the application; -in this case, it is typically driven by certain metrics relevant to the -application such as response time, etc. Other use cases include -rolling upgrade, where a new version of a service can gradually be -scaled up while the older version is gradually scaled down. Scaling -containers is supported at the COE level and is specific to each COE -as well as the version of the COE. You will need to refer to the -documentation for the proper COE version for full details, but -following are some pointers for reference. - -For Kubernetes, pods are scaled manually by setting the count in the -replication controller. Kubernetes version 1.3 and later also -supports `autoscaling -`_. -For Docker, the tool 'Docker Compose' provides the command -`docker-compose scale -`_ which lets you -manually set the number of instances of a container. For Swarm -version 1.12 and later, services can also be scaled manually through -the command `docker service scale -`_. -Automatic scaling for Swarm is not yet available. Mesos manages the -resources and does not support scaling directly; instead, this is -provided by frameworks running within Mesos. With the Marathon -framework currently supported in the Mesos cluster, you can use the -`scale operation -`_ -on the Marathon UI or through a REST API call to manually set the -attribute 'instance' for a container. - -Scaling the cluster nodes involves managing the number of nodes in the -cluster by adding more nodes or removing nodes. There is no direct -correlation between the number of nodes and the number of containers -that can be hosted since the resources consumed (memory, CPU, etc) -depend on the containers. However, if a certain resource is exhausted -in the cluster, adding more nodes would add more resources for hosting -more containers. As part of the infrastructure management, Magnum -supports manual scaling through the attribute 'node_count' in the -cluster, so you can scale the cluster simply by changing this -attribute:: - - magnum cluster-update mycluster replace node_count=2 - -Refer to the section `Scale`_ lifecycle operation for more details. - -Adding nodes to a cluster is straightforward: Magnum deploys -additional VMs or baremetal servers through the heat templates and -invokes the COE-specific mechanism for registering the new nodes to -update the available resources in the cluster. Afterward, it is up to -the COE or user to re-balance the workload by launching new container -instances or re-launching dead instances on the new nodes. - -Removing nodes from a cluster requires some more care to ensure -continuous operation of the containers since the nodes being removed -may be actively hosting some containers. Magnum performs a simple -heuristic that is specific to the COE to find the best node candidates -for removal, as follows: - -Kubernetes - Magnum scans the pods in the namespace 'Default' to determine the - nodes that are *not* hosting any (empty nodes). If the number of - nodes to be removed is equal or less than the number of these empty - nodes, these nodes will be removed from the cluster. If the number - of nodes to be removed is larger than the number of empty nodes, a - warning message will be sent to the Magnum log and the empty nodes - along with additional nodes will be removed from the cluster. The - additional nodes are selected randomly and the pods running on them - will be deleted without warning. For this reason, a good practice - is to manage the pods through the replication controller so that the - deleted pods will be relaunched elsewhere in the cluster. Note also - that even when only the empty nodes are removed, there is no - guarantee that no pod will be deleted because there is no locking to - ensure that Kubernetes will not launch new pods on these nodes after - Magnum has scanned the pods. - -Swarm - No node selection heuristic is currently supported. If you decrease - the node_count, a node will be chosen by magnum without - consideration of what containers are running on the selected node. - -Mesos - Magnum scans the running tasks on Marathon server to determine the - nodes on which there is *no* task running (empty nodes). If the - number of nodes to be removed is equal or less than the number of - these empty nodes, these nodes will be removed from the cluster. - If the number of nodes to be removed is larger than the number of - empty nodes, a warning message will be sent to the Magnum log and - the empty nodes along with additional nodes will be removed from the - cluster. The additional nodes are selected randomly and the containers - running on them will be deleted without warning. Note that even when - only the empty nodes are removed, there is no guarantee that no - container will be deleted because there is no locking to ensure that - Mesos will not launch new containers on these nodes after Magnum - has scanned the tasks. - - -Currently, scaling containers and scaling cluster nodes are handled -separately, but in many use cases, there are interactions between the -two operations. For instance, scaling up the containers may exhaust -the available resources in the cluster, thereby requiring scaling up -the cluster nodes as well. Many complex issues are involved in -managing this interaction. A presentation at the OpenStack Tokyo -Summit 2015 covered some of these issues along with some early -proposals, `Exploring Magnum and Senlin integration for autoscaling -containers -`_. -This remains an active area of discussion and research. - - -======= -Storage -======= - -Currently Cinder provides the block storage to the containers, and the -storage is made available in two ways: as ephemeral storage and as -persistent storage. - -Ephemeral storage ------------------ - -The filesystem for the container consists of multiple layers from the -image and a top layer that holds the modification made by the -container. This top layer requires storage space and the storage is -configured in the Docker daemon through a number of storage options. -When the container is removed, the storage allocated to the particular -container is also deleted. - -Magnum can manage the containers' filesystem in two ways, storing them -on the local disk of the compute instances or in a separate Cinder block -volume for each node in the cluster, mounts it to the node and -configures it to be used as ephemeral storage. Users can specify the -size of the Cinder volume with the ClusterTemplate attribute -'docker-volume-size'. Currently the block size is fixed at cluster -creation time, but future lifecycle operations may allow modifying the -block size during the life of the cluster. - -_`docker_volume_type` - For drivers that support additional volumes for container storage, a - label named 'docker_volume_type' is exposed so that users can select - different cinder volume types for their volumes. The default volume - *must* be set in 'default_docker_volume_type' in the 'cinder' section - of magnum.conf, an obvious value is the default volume type set in - cinder.conf of your cinder deployment . Please note, that - docker_volume_type refers to a cinder volume type and it is unrelated - to docker or kubernetes volumes. - -Both local disk and the Cinder block storage can be used with a number -of Docker storage drivers available. - -* 'devicemapper': When used with a dedicated Cinder volume it is - configured using direct-lvm and offers very good performance. If it's - used with the compute instance's local disk uses a loopback device - offering poor performance and it's not recommended for production - environments. Using the 'devicemapper' driver does allow the use of - SELinux. - -* 'overlay' When used with a dedicated Cinder volume offers as good - or better performance than devicemapper. If used on the local disk of - the compute instance (especially with high IOPS drives) you can get - significant performance gains. However, for kernel versions less than - 4.9, SELinux must be disabled inside the containers resulting in worse - container isolation, although it still runs in enforcing mode on the - cluster compute instances. - -Persistent storage ------------------- - -In some use cases, data read/written by a container needs to persist -so that it can be accessed later. To persist the data, a Cinder -volume with a filesystem on it can be mounted on a host and be made -available to the container, then be unmounted when the container exits. - -Docker provides the 'volume' feature for this purpose: the user -invokes the 'volume create' command, specifying a particular volume -driver to perform the actual work. Then this volume can be mounted -when a container is created. A number of third-party volume drivers -support OpenStack Cinder as the backend, for example Rexray and -Flocker. Magnum currently supports Rexray as the volume driver for -Swarm and Mesos. Other drivers are being considered. - -Kubernetes allows a previously created Cinder block to be mounted to -a pod and this is done by specifying the block ID in the pod YAML file. -When the pod is scheduled on a node, Kubernetes will interface with -Cinder to request the volume to be mounted on this node, then -Kubernetes will launch the Docker container with the proper options to -make the filesystem on the Cinder volume accessible to the container -in the pod. When the pod exits, Kubernetes will again send a request -to Cinder to unmount the volume's filesystem, making it available to be -mounted on other nodes. - -Magnum supports these features to use Cinder as persistent storage -using the ClusterTemplate attribute 'volume-driver' and the support matrix -for the COE types is summarized as follows: - -+--------+-------------+-------------+-------------+ -| Driver | Kubernetes | Swarm | Mesos | -+========+=============+=============+=============+ -| cinder | supported | unsupported | unsupported | -+--------+-------------+-------------+-------------+ -| rexray | unsupported | supported | supported | -+--------+-------------+-------------+-------------+ - -Following are some examples for using Cinder as persistent storage. - -Using Cinder in Kubernetes -++++++++++++++++++++++++++ - -**NOTE:** This feature requires Kubernetes version 1.5.0 or above. -The public Fedora image from Atomic currently meets this requirement. - -1. Create the ClusterTemplate. - - Specify 'cinder' as the volume-driver for Kubernetes:: - - magnum cluster-template-create k8s-cluster-template \ - --image fedora-23-atomic-7 \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --flavor m1.small \ - --docker-volume-size 5 \ - --network-driver flannel \ - --coe kubernetes \ - --volume-driver cinder - -2. Create the cluster:: - - magnum cluster-create k8s-cluster \ - --cluster-template k8s-cluster-template \ - --node-count 1 - -Kubernetes is now ready to use Cinder for persistent storage. -Following is an example illustrating how Cinder is used in a pod. - -1. Create the cinder volume:: - - cinder create --display-name=test-repo 1 - - ID=$(cinder create --display-name=test-repo 1 | awk -F'|' '$2~/^[[:space:]]*id/ {print $3}') - - The command will generate the volume with a ID. The volume ID will be - specified in Step 2. - -2. Create a pod in this cluster and mount this cinder volume to the pod. - Create a file (e.g nginx-cinder.yaml) describing the pod:: - - cat > nginx-cinder.yaml << END - apiVersion: v1 - kind: Pod - metadata: - name: aws-web - spec: - containers: - - name: web - image: nginx - ports: - - name: web - containerPort: 80 - hostPort: 8081 - protocol: TCP - volumeMounts: - - name: html-volume - mountPath: "/usr/share/nginx/html" - volumes: - - name: html-volume - cinder: - # Enter the volume ID below - volumeID: $ID - fsType: ext4 - END - -**NOTE:** The Cinder volume ID needs to be configured in the YAML file -so the existing Cinder volume can be mounted in a pod by specifying -the volume ID in the pod manifest as follows:: - - volumes: - - name: html-volume - cinder: - volumeID: $ID - fsType: ext4 - -3. Create the pod by the normal Kubernetes interface:: - - kubectl create -f nginx-cinder.yaml - -You can start a shell in the container to check that the mountPath exists, -and on an OpenStack client you can run the command 'cinder list' to verify -that the cinder volume status is 'in-use'. - - -Using Cinder in Swarm -+++++++++++++++++++++ -*To be filled in* - - -Using Cinder in Mesos -+++++++++++++++++++++ - -1. Create the ClusterTemplate. - - Specify 'rexray' as the volume-driver for Mesos. As an option, you - can specify in a label the attributes 'rexray_preempt' to enable - any host to take control of a volume regardless if other - hosts are using the volume. If this is set to false, the driver - will ensure data safety by locking the volume:: - - magnum cluster-template-create mesos-cluster-template \ - --image ubuntu-mesos \ - --keypair testkey \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --master-flavor m1.magnum \ - --docker-volume-size 4 \ - --tls-disabled \ - --flavor m1.magnum \ - --coe mesos \ - --volume-driver rexray \ - --labels rexray-preempt=true - -2. Create the Mesos cluster:: - - magnum cluster-create mesos-cluster \ - --cluster-template mesos-cluster-template \ - --node-count 1 - -3. Create the cinder volume and configure this cluster:: - - cinder create --display-name=redisdata 1 - - Create the following file :: - - cat > mesos.json << END - { - "id": "redis", - "container": { - "docker": { - "image": "redis", - "network": "BRIDGE", - "portMappings": [ - { "containerPort": 80, "hostPort": 0, "protocol": "tcp"} - ], - "parameters": [ - { "key": "volume-driver", "value": "rexray" }, - { "key": "volume", "value": "redisdata:/data" } - ] - } - }, - "cpus": 0.2, - "mem": 32.0, - "instances": 1 - } - END - -**NOTE:** When the Mesos cluster is created using this ClusterTemplate, the -Mesos cluster will be configured so that a filesystem on an existing cinder -volume can be mounted in a container by configuring the parameters to mount -the cinder volume in the JSON file :: - - "parameters": [ - { "key": "volume-driver", "value": "rexray" }, - { "key": "volume", "value": "redisdata:/data" } - ] - -4. Create the container using Marathon REST API :: - - MASTER_IP=$(magnum cluster-show mesos-cluster | awk '/ api_address /{print $4}') - curl -X POST -H "Content-Type: application/json" \ - http://${MASTER_IP}:8080/v2/apps -d@mesos.json - -You can log into the container to check that the mountPath exists, and -you can run the command 'cinder list' to verify that your cinder -volume status is 'in-use'. - - -================ -Image Management -================ - -When a COE is deployed, an image from Glance is used to boot the nodes -in the cluster and then the software will be configured and started on -the nodes to bring up the full cluster. An image is based on a -particular distro such as Fedora, Ubuntu, etc, and is prebuilt with -the software specific to the COE such as Kubernetes, Swarm, Mesos. -The image is tightly coupled with the following in Magnum: - -1. Heat templates to orchestrate the configuration. - -2. Template definition to map ClusterTemplate parameters to Heat - template parameters. - -3. Set of scripts to configure software. - -Collectively, they constitute the driver for a particular COE and a -particular distro; therefore, developing a new image needs to be done -in conjunction with developing these other components. Image can be -built by various methods such as diskimagebuilder, or in some case, a -distro image can be used directly. A number of drivers and the -associated images is supported in Magnum as reference implementation. -In this section, we focus mainly on the supported images. - -All images must include support for cloud-init and the heat software -configuration utility: - -- os-collect-config -- os-refresh-config -- os-apply-config -- heat-config -- heat-config-script - -Additional software are described as follows. - -Kubernetes on Fedora Atomic ---------------------------- - -This image can be downloaded from the `public Atomic site -`_ -or can be built locally using diskimagebuilder. Details can be found in the -`fedora-atomic element -`_ -The image currently has the following OS/software: - -+-------------+-----------+ -| OS/software | version | -+=============+===========+ -| Fedora | 25 | -+-------------+-----------+ -| Docker | 1.12.6 | -+-------------+-----------+ -| Kubernetes | 1.5.3 | -+-------------+-----------+ -| etcd | 3.1.3 | -+-------------+-----------+ -| Flannel | 0.7.0 | -+-------------+-----------+ - -The following software are managed as systemd services: - -- kube-apiserver -- kubelet -- etcd -- flannel (if specified as network driver) -- docker - -The following software are managed as Docker containers: - -- kube-controller-manager -- kube-scheduler -- kube-proxy - -The login for this image is *fedora*. - -Kubernetes on CoreOS --------------------- - -CoreOS publishes a `stock image -`_ -that is being used to deploy Kubernetes. -This image has the following OS/software: - -+-------------+-----------+ -| OS/software | version | -+=============+===========+ -| CoreOS | 4.3.6 | -+-------------+-----------+ -| Docker | 1.9.1 | -+-------------+-----------+ -| Kubernetes | 1.0.6 | -+-------------+-----------+ -| etcd | 2.2.3 | -+-------------+-----------+ -| Flannel | 0.5.5 | -+-------------+-----------+ - -The following software are managed as systemd services: - -- kubelet -- flannel (if specified as network driver) -- docker -- etcd - -The following software are managed as Docker containers: - -- kube-apiserver -- kube-controller-manager -- kube-scheduler -- kube-proxy - -The login for this image is *core*. - -Kubernetes on Ironic --------------------- - -This image is built manually using diskimagebuilder. The scripts and -instructions are included in `Magnum code repo -`_. -Currently Ironic is not fully supported yet, therefore more details will be -provided when this driver has been fully tested. - - -Swarm on Fedora Atomic ----------------------- - -This image is the same as the image for `Kubernetes on Fedora Atomic`_ -described above. The login for this image is *fedora*. - -Mesos on Ubuntu ---------------- - -This image is built manually using diskimagebuilder. The instructions are -provided in the section `Diskimage-builder`_. -The Fedora site hosts the current image `ubuntu-mesos-latest.qcow2 -`_. - -+-------------+-----------+ -| OS/software | version | -+=============+===========+ -| Ubuntu | 14.04 | -+-------------+-----------+ -| Docker | 1.8.1 | -+-------------+-----------+ -| Mesos | 0.25.0 | -+-------------+-----------+ -| Marathon | 0.11.1 | -+-------------+-----------+ - -============ -Notification -============ - -Magnum provides notifications about usage data so that 3rd party applications -can use the data for auditing, billing, monitoring, or quota purposes. This -document describes the current inclusions and exclusions for Magnum -notifications. - -Magnum uses Cloud Auditing Data Federation (`CADF`_) Notification as its -notification format for better support of auditing, details about CADF are -documented below. - -Auditing with CADF ------------------- - -Magnum uses the `PyCADF`_ library to emit CADF notifications, these events -adhere to the DMTF `CADF`_ specification. This standard provides auditing -capabilities for compliance with security, operational, and business processes -and supports normalized and categorized event data for federation and -aggregation. - -.. _PyCADF: http://docs.openstack.org/developer/pycadf -.. _CADF: http://www.dmtf.org/standards/cadf - -Below table describes the event model components and semantics for -each component: - -+-----------------+----------------------------------------------------------+ -| model component | CADF Definition | -+=================+==========================================================+ -| OBSERVER | The RESOURCE that generates the CADF Event Record based | -| | on its observation (directly or indirectly) of the | -| | Actual Event. | -+-----------------+----------------------------------------------------------+ -| INITIATOR | The RESOURCE that initiated, originated, or instigated | -| | the event's ACTION, according to the OBSERVER. | -+-----------------+----------------------------------------------------------+ -| ACTION | The operation or activity the INITIATOR has performed, | -| | has attempted to perform or has pending against the | -| | event's TARGET, according to the OBSERVER. | -+-----------------+----------------------------------------------------------+ -| TARGET | The RESOURCE against which the ACTION of a CADF Event | -| | Record was performed, attempted, or is pending, | -| | according to the OBSERVER. | -+-----------------+----------------------------------------------------------+ -| OUTCOME | The result or status of the ACTION against the TARGET, | -| | according to the OBSERVER. | -+-----------------+----------------------------------------------------------+ - -The ``payload`` portion of a CADF Notification is a CADF ``event``, which -is represented as a JSON dictionary. For example: - -.. code-block:: javascript - - { - "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", - "initiator": { - "typeURI": "service/security/account/user", - "host": { - "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", - "address": "127.0.0.1" - }, - "id": "" - }, - "target": { - "typeURI": "", - "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" - }, - "observer": { - "typeURI": "service/security", - "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" - }, - "eventType": "activity", - "eventTime": "2014-02-14T01:20:47.932842+00:00", - "action": "", - "outcome": "success", - "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", - } - -Where the following are defined: - -* ````: ID of the user that performed the operation -* ````: CADF specific target URI, (i.e.: data/security/project) -* ````: The action being performed, typically: - ````. ```` - -Additionally there may be extra keys present depending on the operation being -performed, these will be discussed below. - -Note, the ``eventType`` property of the CADF payload is different from the -``event_type`` property of a notifications. The former (``eventType``) is a -CADF keyword which designates the type of event that is being measured, this -can be: `activity`, `monitor` or `control`. Whereas the latter -(``event_type``) is described in previous sections as: -`magnum..` - -Supported Events ----------------- - -The following table displays the corresponding relationship between resource -types and operations. The bay type is deprecated and will be removed in a -future version. Cluster is the new equivalent term. - -+---------------+----------------------------+-------------------------+ -| resource type | supported operations | typeURI | -+===============+============================+=========================+ -| bay | create, update, delete | service/magnum/bay | -+---------------+----------------------------+-------------------------+ -| cluster | create, update, delete | service/magnum/cluster | -+---------------+----------------------------+-------------------------+ - -Example Notification - Cluster Create -------------------------------------- - -The following is an example of a notification that is sent when a cluster is -created. This example can be applied for any ``create``, ``update`` or -``delete`` event that is seen in the table above. The ```` and -``typeURI`` fields will be change. - -.. code-block:: javascript - - { - "event_type": "magnum.cluster.created", - "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", - "payload": { - "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", - "initiator": { - "typeURI": "service/security/account/user", - "id": "c9f76d3c31e142af9291de2935bde98a", - "user_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", - "project_id": "3d4a50a9-2b59-438b-bf19-c231f9c7625a" - }, - "target": { - "typeURI": "service/magnum/cluster", - "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" - }, - "observer": { - "typeURI": "service/magnum/cluster", - "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" - }, - "eventType": "activity", - "eventTime": "2015-05-20T01:20:47.932842+00:00", - "action": "create", - "outcome": "success", - "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", - "resource_info": "671da331c47d4e29bb6ea1d270154ec3" - } - "priority": "INFO", - "publisher_id": "magnum.host1234", - "timestamp": "2016-05-20 15:03:45.960280" - } - - -==================== -Container Monitoring -==================== - -The offered monitoring stack relies on the following set of containers and -services: - -- cAdvisor -- Node Exporter -- Prometheus -- Grafana - -To setup this monitoring stack, users are given two configurable labels in -the Magnum cluster template's definition: - -_`prometheus_monitoring` - This label accepts a boolean value. If *True*, the monitoring stack will be - setup. By default *prometheus_monitoring = False*. - -_`grafana_admin_passwd` - This label lets users create their own *admin* user password for the Grafana - interface. It expects a string value. By default it is set to *admin*. - - -Container Monitoring in Kubernetes ----------------------------------- - -By default, all Kubernetes clusters already contain *cAdvisor* integrated -with the *Kubelet* binary. Its container monitoring data can be accessed on -a node level basis through *http://NODE_IP:4194*. - -Node Exporter is part of the above mentioned monitoring stack as it can be -used to export machine metrics. Such functionality also work on a node level -which means that when `prometheus_monitoring`_ is *True*, the Kubernetes nodes -will be populated with an additional manifest under -*/etc/kubernetes/manifests*. Node Exporter is then automatically picked up -and launched as a regular Kubernetes POD. - -To aggregate and complement all the existing monitoring metrics and add a -built-in visualization layer, Prometheus is used. It is launched by the -Kubernetes master node(s) as a *Service* within a *Deployment* with one -replica and it relies on a *ConfigMap* where the Prometheus configuration -(prometheus.yml) is defined. This configuration uses Prometheus native -support for service discovery in Kubernetes clusters, -*kubernetes_sd_configs*. The respective manifests can be found in -*/srv/kubernetes/monitoring/* on the master nodes and once the service is -up and running, Prometheus UI can be accessed through port 9090. - -Finally, for custom plotting and enhanced metric aggregation and -visualization, Prometheus can be integrated with Grafana as it provides -native compliance for Prometheus data sources. Also Grafana is deployed as -a *Service* within a *Deployment* with one replica. The default user is -*admin* and the password is setup according to `grafana_admin_passwd`_. -There is also a default Grafana dashboard provided with this installation, -from the official `Grafana dashboards' repository -`_. The Prometheus data -source is automatically added to Grafana once it is up and running, pointing -to *http://prometheus:9090* through *Proxy*. The respective manifests can -also be found in */srv/kubernetes/monitoring/* on the master nodes and once -the service is running, the Grafana dashboards can be accessed through port -3000. - -For both Prometheus and Grafana, there is an assigned *systemd* service -called *kube-enable-monitoring*. - -================================= -Kubernetes External Load Balancer -================================= - -.. include:: kubernetes-load-balancer.rst diff --git a/doc/source/user/kubernetes-load-balancer.rst b/doc/source/user/kubernetes-load-balancer.rst deleted file mode 100644 index ed6510b9..00000000 --- a/doc/source/user/kubernetes-load-balancer.rst +++ /dev/null @@ -1,294 +0,0 @@ -In a Kubernetes cluster, all masters and minions are connected to a private -Neutron subnet, which in turn is connected by a router to the public network. -This allows the nodes to access each other and the external internet. - -All Kubernetes pods and services created in the cluster are connected to a -private container network which by default is Flannel, an overlay network that -runs on top of the Neutron private subnet. The pods and services are assigned -IP addresses from this container network and they can access each other and -the external internet. However, these IP addresses are not accessible from an -external network. - -To publish a service endpoint externally so that the service can be accessed -from the external network, Kubernetes provides the external load balancer -feature. This is done by simply specifying the attribute "type: LoadBalancer" -in the service manifest. When the service is created, Kubernetes will add an -external load balancer in front of the service so that the service will have -an external IP address in addition to the internal IP address on the container -network. The service endpoint can then be accessed with this external IP -address. Refer to the `Kubernetes service document `_ for more details. - -A Kubernetes cluster deployed by Magnum will have all the necessary -configuration required for the external load balancer. This document describes -how to use this feature. - -Steps for the cluster administrator ------------------------------------ - -Because the Kubernetes master needs to interface with OpenStack to create and -manage the Neutron load balancer, we need to provide a credential for -Kubernetes to use. - -In the current implementation, the cluster administrator needs to manually -perform this step. We are looking into several ways to let Magnum automate -this step in a secure manner. This means that after the Kubernetes cluster is -initially deployed, the load balancer support is disabled. If the -administrator does not want to enable this feature, no further action is -required. All the services will be created normally; services that specify the -load balancer will also be created successfully, but a load balancer will not -be created. - -Note that different versions of Kubernetes require different versions of -Neutron LBaaS plugin running on the OpenStack instance:: - - ============================ ============================== - Kubernetes Version on Master Neutron LBaaS Version Required - ============================ ============================== - 1.2 LBaaS v1 - 1.3 or later LBaaS v2 - ============================ ============================== - -Before enabling the Kubernetes load balancer feature, confirm that the -OpenStack instance is running the required version of Neutron LBaaS plugin. -To determine if your OpenStack instance is running LBaaS v1, try running -the following command from your OpenStack control node:: - - neutron lb-pool-list - -Or look for the following configuration in neutron.conf or -neutron_lbaas.conf:: - - service_provider = LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default - -To determine if your OpenStack instance is running LBaaS v2, try running -the following command from your OpenStack control node:: - - neutron lbaas-pool-list - -Or look for the following configuration in neutron.conf or -neutron_lbaas.conf:: - - service_plugins = neutron.plugins.services.agent_loadbalancer.plugin.LoadBalancerPluginv2 - -To configure LBaaS v1 or v2, refer to the Neutron documentation. - -Before deleting the Kubernetes cluster, make sure to -delete all the services that created load balancers. Because the Neutron -objects created by Kubernetes are not managed by Heat, they will not be -deleted by Heat and this will cause the cluster-delete operation to fail. If -this occurs, delete the neutron objects manually (lb-pool, lb-vip, lb-member, -lb-healthmonitor) and then run cluster-delete again. - -Steps for the users -------------------- - -This feature requires the OpenStack cloud provider to be enabled. -To do so, enable the cinder support (--volume-driver cinder). - -For the user, publishing the service endpoint externally involves the following -2 steps: - -1. Specify "type: LoadBalancer" in the service manifest -2. After the service is created, associate a floating IP with the VIP of the - load balancer pool. - -The following example illustrates how to create an external endpoint for -a pod running nginx. - -Create a file (e.g nginx.yaml) describing a pod running nginx:: - - apiVersion: v1 - kind: Pod - metadata: - name: nginx - labels: - app: nginx - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 - -Create a file (e.g nginx-service.yaml) describing a service for the nginx pod:: - - apiVersion: v1 - kind: Service - metadata: - name: nginxservice - labels: - app: nginx - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - selector: - app: nginx - type: LoadBalancer - -Please refer to the `quickstart `_ guide on how to connect to Kubernetes running on the launched -cluster. Assuming a Kubernetes cluster named k8sclusterv1 has been created, -deploy the pod and service using following commands:: - - kubectl create -f nginx.yaml - - kubectl create -f nginx-service.yaml - -For more details on verifying the load balancer in OpenStack, refer to the -following section on how it works. - -Next, associate a floating IP to the load balancer. This can be done easily -on Horizon by navigating to:: - - Compute -> Access & Security -> Floating IPs - -Click on "Allocate IP To Project" and then on "Associate" for the new floating -IP. - -Alternatively, associating a floating IP can be done on the command line by -allocating a floating IP, finding the port of the VIP, and associating the -floating IP to the port. -The commands shown below are for illustration purpose and assume -that there is only one service with load balancer running in the cluster and -no other load balancers exist except for those created for the cluster. - -First create a floating IP on the public network:: - - neutron floatingip-create public - - Created a new floatingip: - - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | fixed_ip_address | | - | floating_ip_address | 172.24.4.78 | - | floating_network_id | 4808eacb-e1a0-40aa-97b6-ecb745af2a4d | - | id | b170eb7a-41d0-4c00-9207-18ad1c30fecf | - | port_id | | - | router_id | | - | status | DOWN | - | tenant_id | 012722667dc64de6bf161556f49b8a62 | - +---------------------+--------------------------------------+ - -Note the floating IP 172.24.4.78 that has been allocated. The ID for this -floating IP is shown above, but it can also be queried by:: - - FLOATING_ID=$(neutron floatingip-list | grep "172.24.4.78" | awk '{print $2}') - -Next find the VIP for the load balancer:: - - VIP_ID=$(neutron lb-vip-list | grep TCP | grep -v pool | awk '{print $2}') - -Find the port for this VIP:: - - PORT_ID=$(neutron lb-vip-show $VIP_ID | grep port_id | awk '{print $4}') - -Finally associate the floating IP with the port of the VIP:: - - neutron floatingip-associate $FLOATING_ID $PORT_ID - -The endpoint for nginx can now be accessed on a browser at this floating IP:: - - http://172.24.4.78:80 - -Alternatively, you can check for the nginx 'welcome' message by:: - - curl http://172.24.4.78:80 - -NOTE: it is not necessary to indicate port :80 here but it is shown to -correlate with the port that was specified in the service manifest. - -How it works ------------- - -Kubernetes is designed to work with different Clouds such as Google Compute -Engine (GCE), Amazon Web Services (AWS), and OpenStack; therefore, different -load balancers need to be created on the particular Cloud for the services. -This is done through a plugin for each Cloud and the OpenStack plugin was -developed by Angus Lees:: - - https://github.com/kubernetes/kubernetes/blob/release-1.0/pkg/cloudprovider/openstack/openstack.go - -When the Kubernetes components kube-apiserver and kube-controller-manager start -up, they will use the credential provided to authenticate a client -to interface with OpenStack. - -When a service with load balancer is created, the plugin code will interface -with Neutron in this sequence: - -1. Create lb-pool for the Kubernetes service -2. Create lb-member for the minions -3. Create lb-healthmonitor -4. Create lb-vip on the private network of the Kubernetes cluster - -These Neutron objects can be verified as follows. For the load balancer pool:: - - neutron lb-pool-list - +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ - | id | name | provider | lb_method | protocol | admin_state_up | status | - +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ - | 241357b3-2a8f-442e-b534-bde7cd6ba7e4 | a1f03e40f634011e59c9efa163eae8ab | haproxy | ROUND_ROBIN | TCP | True | ACTIVE | - | 82b39251-1455-4eb6-a81e-802b54c2df29 | k8sclusterv1-iypacicrskib-api_pool-fydshw7uvr7h | haproxy | ROUND_ROBIN | HTTP | True | ACTIVE | - | e59ea983-c6e8-4cec-975d-89ade6b59e50 | k8sclusterv1-iypacicrskib-etcd_pool-qbpo43ew2m3x | haproxy | ROUND_ROBIN | HTTP | True | ACTIVE | - +--------------------------------------+--------------------------------------------------+----------+-------------+----------+----------------+--------+ - -Note that 2 load balancers already exist to implement high availability for the -cluster (api and ectd). The new load balancer for the Kubernetes service uses -the TCP protocol and has a name assigned by Kubernetes. - -For the members of the pool:: - - neutron lb-member-list - +--------------------------------------+----------+---------------+--------+----------------+--------+ - | id | address | protocol_port | weight | admin_state_up | status | - +--------------------------------------+----------+---------------+--------+----------------+--------+ - | 9ab7dcd7-6e10-4d9f-ba66-861f4d4d627c | 10.0.0.5 | 8080 | 1 | True | ACTIVE | - | b179c1ad-456d-44b2-bf83-9cdc127c2b27 | 10.0.0.5 | 2379 | 1 | True | ACTIVE | - | f222b60e-e4a9-4767-bc44-ffa66ec22afe | 10.0.0.6 | 31157 | 1 | True | ACTIVE | - +--------------------------------------+----------+---------------+--------+----------------+--------+ - -Again, 2 members already exist for high availability and they serve the master -node at 10.0.0.5. The new member serves the minion at 10.0.0.6, which hosts the -Kubernetes service. - -For the monitor of the pool:: - - neutron lb-healthmonitor-list - +--------------------------------------+------+----------------+ - | id | type | admin_state_up | - +--------------------------------------+------+----------------+ - | 381d3d35-7912-40da-9dc9-b2322d5dda47 | TCP | True | - | 67f2ae8f-ffc6-4f86-ba5f-1a135f4af85c | TCP | True | - | d55ff0f3-9149-44e7-9b52-2e055c27d1d3 | TCP | True | - +--------------------------------------+------+----------------+ - -For the VIP of the pool:: - - neutron lb-vip-list - +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ - | id | name | address | protocol | admin_state_up | status | - +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ - | 9ae2ebfb-b409-4167-9583-4a3588d2ff42 | api_pool.vip | 10.0.0.3 | HTTP | True | ACTIVE | - | c318aec6-8b7b-485c-a419-1285a7561152 | a1f03e40f634011e59c9efa163eae8ab | 10.0.0.7 | TCP | True | ACTIVE | - | fc62cf40-46ad-47bd-aa1e-48339b95b011 | etcd_pool.vip | 10.0.0.4 | HTTP | True | ACTIVE | - +--------------------------------------+----------------------------------+----------+----------+----------------+--------+ - -Note that the VIP is created on the private network of the cluster; therefore -it has an internal IP address of 10.0.0.7. This address is also associated as -the "external address" of the Kubernetes service. You can verify this in -Kubernetes by running following command:: - - kubectl get services - NAME LABELS SELECTOR IP(S) PORT(S) - kubernetes component=apiserver,provider=kubernetes 10.254.0.1 443/TCP - nginxservice app=nginx app=nginx 10.254.122.191 80/TCP - 10.0.0.7 - -On GCE, the networking implementation gives the load balancer an external -address automatically. On OpenStack, we need to take the additional step of -associating a floating IP to the load balancer. - diff --git a/etc/magnum/README-magnum.conf.txt b/etc/magnum/README-magnum.conf.txt deleted file mode 100644 index 893453b9..00000000 --- a/etc/magnum/README-magnum.conf.txt +++ /dev/null @@ -1,5 +0,0 @@ -To generate the sample magnum.conf file, run the following -command from the top level of the magnum directory: - -tox -egenconfig - diff --git a/etc/magnum/api-paste.ini b/etc/magnum/api-paste.ini deleted file mode 100644 index d1f56fb7..00000000 --- a/etc/magnum/api-paste.ini +++ /dev/null @@ -1,28 +0,0 @@ -[pipeline:main] -pipeline = cors healthcheck http_proxy_to_wsgi request_id osprofiler authtoken api_v1 - -[app:api_v1] -paste.app_factory = magnum.api.app:app_factory - -[filter:authtoken] -acl_public_routes = /, /v1 -paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory - -[filter:osprofiler] -paste.filter_factory = magnum.common.profiler:WsgiMiddleware.factory - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = magnum - -[filter:healthcheck] -paste.filter_factory = oslo_middleware:Healthcheck.factory -backends = disable_by_file -disable_by_file_path = /etc/magnum/healthcheck_disable - -[filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory -oslo_config_project = magnum diff --git a/etc/magnum/magnum-config-generator.conf b/etc/magnum/magnum-config-generator.conf deleted file mode 100644 index 23dc094e..00000000 --- a/etc/magnum/magnum-config-generator.conf +++ /dev/null @@ -1,14 +0,0 @@ -[DEFAULT] -output_file = etc/magnum/magnum.conf.sample -wrap_width = 79 - -namespace = magnum.conf -namespace = oslo.concurrency -namespace = oslo.db -namespace = oslo.log -namespace = oslo.messaging -namespace = oslo.middleware.cors -namespace = oslo.policy -namespace = oslo.service.periodic_task -namespace = oslo.service.service -namespace = keystonemiddleware.auth_token diff --git a/etc/magnum/policy.json b/etc/magnum/policy.json deleted file mode 100644 index cb19ad74..00000000 --- a/etc/magnum/policy.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_or_owner": "is_admin:True or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - "admin_api": "rule:context_is_admin", - "admin_or_user": "is_admin:True or user_id:%(user_id)s", - "cluster_user": "user_id:%(trustee_user_id)s", - "deny_cluster_user": "not domain_id:%(trustee_domain_id)s", - - "bay:create": "rule:deny_cluster_user", - "bay:delete": "rule:deny_cluster_user", - "bay:detail": "rule:deny_cluster_user", - "bay:get": "rule:deny_cluster_user", - "bay:get_all": "rule:deny_cluster_user", - "bay:update": "rule:deny_cluster_user", - - "baymodel:create": "rule:deny_cluster_user", - "baymodel:delete": "rule:deny_cluster_user", - "baymodel:detail": "rule:deny_cluster_user", - "baymodel:get": "rule:deny_cluster_user", - "baymodel:get_all": "rule:deny_cluster_user", - "baymodel:update": "rule:deny_cluster_user", - "baymodel:publish": "rule:admin_api", - - "cluster:create": "rule:deny_cluster_user", - "cluster:delete": "rule:deny_cluster_user", - "cluster:detail": "rule:deny_cluster_user", - "cluster:get": "rule:deny_cluster_user", - "cluster:get_all": "rule:deny_cluster_user", - "cluster:update": "rule:deny_cluster_user", - - "clustertemplate:create": "rule:deny_cluster_user", - "clustertemplate:delete": "rule:deny_cluster_user", - "clustertemplate:detail": "rule:deny_cluster_user", - "clustertemplate:get": "rule:deny_cluster_user", - "clustertemplate:get_all": "rule:deny_cluster_user", - "clustertemplate:update": "rule:deny_cluster_user", - "clustertemplate:publish": "rule:admin_api", - - "quotas:get": "rule:default", - "quotas:get_all": "rule:admin_api", - "quotas:create": "rule:admin_api", - "quotas:update": "rule:admin_api", - "quotas:delete": "rule:admin_api", - - "certificate:rotate_ca": "rule:admin_or_owner", - "certificate:create": "rule:admin_or_user or rule:cluster_user", - "certificate:get": "rule:admin_or_user or rule:cluster_user", - - "magnum-service:get_all": "rule:admin_api", - "stats:get_all": "rule:admin_or_owner" -} diff --git a/functional_creds.conf.sample b/functional_creds.conf.sample deleted file mode 100644 index c25ac449..00000000 --- a/functional_creds.conf.sample +++ /dev/null @@ -1,23 +0,0 @@ -# Credentials for functional testing -[auth] -auth_url = http://127.0.0.1:5000/v3 -magnum_url = http://127.0.0.1:9511/v1 -username = demo -project_name = demo -project_domain_id = default -user_domain_id = default -password = password -auth_version = v3 -insecure=False -[admin] -user = admin -project_name = admin -pass = password -project_domain_id = default -user_domain_id = default -[magnum] -image_id = fedora-atomic-latest -nic_id = public -keypair_id = default -flavor_id = s1.magnum -master_flavor_id = m1.magnum diff --git a/install-guide/source/common/configure_2_edit_magnum_conf.rst b/install-guide/source/common/configure_2_edit_magnum_conf.rst deleted file mode 100644 index a3b6544a..00000000 --- a/install-guide/source/common/configure_2_edit_magnum_conf.rst +++ /dev/null @@ -1,111 +0,0 @@ -2. Edit the ``/etc/magnum/magnum.conf`` file: - - * In the ``[api]`` section, configure the host: - - .. code-block:: ini - - [api] - ... - host = CONTROLLER_IP - - Replace ``CONTROLLER_IP`` with the IP address on which you wish magnum api - should listen. - - * In the ``[certificates]`` section, select ``barbican`` (or ``x509keypair`` if - you don't have barbican installed): - - * Use barbican to store certificates: - - .. code-block:: ini - - [certificates] - ... - cert_manager_type = barbican - - .. important:: - - Barbican is recommended for production environments. - - * To store x509 certificates in magnum's database: - - .. code-block:: ini - - [certificates] - ... - cert_manager_type = x509keypair - - * In the ``[cinder_client]`` section, configure the region name: - - .. code-block:: ini - - [cinder_client] - ... - region_name = RegionOne - - * In the ``[database]`` section, configure database access: - - .. code-block:: ini - - [database] - ... - connection = mysql+pymysql://magnum:MAGNUM_DBPASS@controller/magnum - - Replace ``MAGNUM_DBPASS`` with the password you chose for - the magnum database. - - * In the ``[keystone_authtoken]`` and ``[trust]`` sections, configure - Identity service access: - - .. code-block:: ini - - [keystone_authtoken] - ... - memcached_servers = controller:11211 - auth_version = v3 - auth_uri = http://controller:5000/v3 - project_domain_id = default - project_name = service - user_domain_id = default - password = MAGNUM_PASS - username = magnum - auth_url = http://controller:35357 - auth_type = password - - [trust] - ... - trustee_domain_name = magnum - trustee_domain_admin_name = magnum_domain_admin - trustee_domain_admin_password = DOMAIN_ADMIN_PASS - trustee_keystone_interface = KEYSTONE_INTERFACE - - Replace MAGNUM_PASS with the password you chose for the magnum user in the - Identity service and DOMAIN_ADMIN_PASS with the password you chose for the - ``magnum_domain_admin`` user. - - Replace KEYSTONE_INTERFACE with either ``public`` or ``internal`` - depending on your network configuration. If your instances cannot reach - internal keystone endpoint which is often the case in production - environments it should be set to ``public``. Default to ``public`` - - * In the ``[oslo_messaging_notifications]`` section, configure the - ``driver``: - - .. code-block:: ini - - [oslo_messaging_notifications] - ... - driver = messaging - - * In the ``[DEFAULT]`` section, - configure ``RabbitMQ`` message queue access: - - .. code-block:: ini - - [DEFAULT] - ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - Replace ``RABBIT_PASS`` with the password you chose for the - ``openstack`` account in ``RabbitMQ``. - - diff --git a/install-guide/source/common/configure_3_populate_database.rst b/install-guide/source/common/configure_3_populate_database.rst deleted file mode 100644 index dac69fd1..00000000 --- a/install-guide/source/common/configure_3_populate_database.rst +++ /dev/null @@ -1,5 +0,0 @@ -3. Populate Magnum database: - - .. code-block:: console - - # su -s /bin/sh -c "magnum-db-manage upgrade" magnum diff --git a/install-guide/source/common/prerequisites.rst b/install-guide/source/common/prerequisites.rst deleted file mode 100644 index 6ce4bd9f..00000000 --- a/install-guide/source/common/prerequisites.rst +++ /dev/null @@ -1,191 +0,0 @@ -Prerequisites -------------- - -Before you install and configure the Container Infrastructure Management -service, you must create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - * Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - * Create the ``magnum`` database: - - .. code-block:: console - - CREATE DATABASE magnum; - - * Grant proper access to the ``magnum`` database: - - .. code-block:: console - - GRANT ALL PRIVILEGES ON magnum.* TO 'magnum'@'localhost' \ - IDENTIFIED BY 'MAGNUM_DBPASS'; - GRANT ALL PRIVILEGES ON magnum.* TO 'magnum'@'%' \ - IDENTIFIED BY 'MAGNUM_DBPASS'; - - Replace ``MAGNUM_DBPASS`` with a suitable password. - - * Exit the database access client. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - * Create the ``magnum`` user: - - .. code-block:: console - - - $ openstack user create --domain default \ - --password-prompt magnum - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | a8ebafc275c54d389dfc1bff8b4fe286 | - | name | magnum | - +-----------+----------------------------------+ - - * Add the ``admin`` role to the ``magnum`` user: - - .. code-block:: console - - $ openstack role add --project service --user magnum admin - - .. note:: - - This command provides no output. - - * Create the ``magnum`` service entity: - - .. code-block:: console - - $ openstack service create --name magnum \ - --description "OpenStack Container Infrastructure Management Service" \ - container-infra - +-------------+-------------------------------------------------------+ - | Field | Value | - +-------------+-------------------------------------------------------+ - | description | OpenStack Container Infrastructure Management Service | - | enabled | True | - | id | 194faf83e8fd4e028e5ff75d3d8d0df2 | - | name | magnum | - | type | container-infra | - +-------------+-------------------------------------------------------+ - -#. Create the Container Infrastructure Management service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - container-infra public http://CONTROLLER_IP:9511/v1 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | cb137e6366ad495bb521cfe92d8b8858 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | - | service_name | magnum | - | service_type | container-infra | - | url | http://CONTROLLER_IP:9511/v1 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - container-infra internal http://CONTROLLER_IP:9511/v1 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 17cbc3b6f51449a0a818118d6d62868d | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | - | service_name | magnum | - | service_type | container-infra | - | url | http://CONTROLLER_IP:9511/v1 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - container-infra admin http://CONTROLLER_IP:9511/v1 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 30f8888e6b6646d7b5cd14354c95a684 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 0f7f62a1f1a247d2a4cb237642814d0e | - | service_name | magnum | - | service_type | container-infra | - | url | http://CONTROLLER_IP:9511/v1 | - +--------------+----------------------------------+ - - Replace ``CONTROLLER_IP`` with the IP magnum listens to. Alternatively, - you can use a hostname which is reachable by the Compute instances. - -#. Magnum requires additional information in the Identity service to - manage COE clusters. To add this information, complete these steps: - - * Create the ``magnum`` domain that contains projects and users: - - .. code-block:: console - - $ openstack domain create --description "Owns users and projects \ - created by magnum" magnum - +-------------+-------------------------------------------+ - | Field | Value | - +-------------+-------------------------------------------+ - | description | Owns users and projects created by magnum | - | enabled | True | - | id | 66e0469de9c04eda9bc368e001676d20 | - | name | magnum | - +-------------+-------------------------------------------+ - - * Create the ``magnum_domain_admin`` user to manage projects and users - in the ``magnum`` domain: - - .. code-block:: console - - $ openstack user create --domain magnum --password-prompt \ - magnum_domain_admin - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | 66e0469de9c04eda9bc368e001676d20 | - | enabled | True | - | id | 529b81cf35094beb9784c6d06c090c2b | - | name | magnum_domain_admin | - +-----------+----------------------------------+ - - * Add the ``admin`` role to the ``magnum_domain_admin`` user in the - ``magnum`` domain to enable administrative management privileges - by the ``magnum_domain_admin`` user: - - .. code-block:: console - - $ openstack role add --domain magnum --user-domain magnum --user \ - magnum_domain_admin admin - - .. note:: - - This command provides no output. diff --git a/install-guide/source/conf.py b/install-guide/source/conf.py deleted file mode 100644 index ba96a51a..00000000 --- a/install-guide/source/conf.py +++ /dev/null @@ -1,301 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os - - -import openstackdocstheme - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -# TODO(ajaeger): enable PDF building, for example add 'rst2pdf.pdfbuilder' -# extensions = - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Installation Guide for Container Infrastructure Management Service' -bug_tag = u'install-guide' -copyright = u'2016, OpenStack contributors' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from magnum.version import version_info as magnum_version -version = magnum_version.canonical_version_string() -# The full version, including alpha/beta/rc tags. -release = magnum_version.version_string_with_vcs() - -# A few variables have to be set for the log-a-bug feature. -# giturl: The location of conf.py on Git. Must be set manually. -# gitsha: The SHA checksum of the bug description. Automatically extracted -# from git log. -# bug_tag: Tag for categorizing the bug. Must be set manually. -# These variables are passed to the logabug code via html_context. -giturl = u'https://git.openstack.org/cgit/openstack/magnum/tree/' -giturl += u'install-guide/source' -git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '" -gitsha = os.popen(git_cmd).read().strip('\n') -html_context = {"gitsha": gitsha, "bug_tag": bug_tag, - "giturl": giturl, "bug_project": "magnum"} - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["common/*.rst"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = [] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# So that we can enable "log-a-bug" links from each output HTML page, this -# variable must be set to a format that includes year, month, day, hours and -# minutes. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -html_use_index = False - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = False - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'install-guide' - -# If true, publish source files -html_copy_source = False - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'InstallGuide.tex', u'Install Guide', - u'OpenStack contributors', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'installguide', u'Install Guide', - [u'OpenStack contributors'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'InstallGuide', u'Install Guide', - u'OpenStack contributors', 'InstallGuide', - 'This guide shows OpenStack end users how to install ' - 'an OpenStack cloud.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] - -# -- Options for PDF output -------------------------------------------------- - -pdf_documents = [ - ('index', u'InstallGuide', u'Install Guide', - u'OpenStack contributors') -] diff --git a/install-guide/source/get_started.rst b/install-guide/source/get_started.rst deleted file mode 100644 index d5964fbb..00000000 --- a/install-guide/source/get_started.rst +++ /dev/null @@ -1,20 +0,0 @@ -==================================================== -Container Infrastructure Management service overview -==================================================== - -The Container Infrastructure Management service consists of the -following components: - -``magnum`` command-line client - A CLI that communicates with the ``magnum-api`` to create and manage - container clusters. End developers can directly use the magnum - REST API. - -``magnum-api`` service - An OpenStack-native REST API that processes API requests by sending - them to the ``magnum-conductor`` via AMQP. - -``magnum-conductor`` service - Runs on a controller machine and connects to heat to orchestrate a - cluster. Additionally, it connects to a Docker Swarm, Kubernetes - or Mesos REST API endpoint. diff --git a/install-guide/source/index.rst b/install-guide/source/index.rst deleted file mode 100644 index 17a15453..00000000 --- a/install-guide/source/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -=========================================== -Container Infrastructure Management service -=========================================== - -.. toctree:: - :maxdepth: 2 - - get_started.rst - install.rst - verify.rst - launch-instance.rst - next-steps.rst - -The Container Infrastructure Management service codenamed (magnum) is an -OpenStack API service developed by the OpenStack Containers Team making -container orchestration engines (COE) such as Docker Swarm, Kubernetes -and Mesos available as first class resources in OpenStack. Magnum uses -Heat to orchestrate an OS image which contains Docker and Kubernetes and -runs that image in either virtual machines or bare metal in a cluster -configuration. - -This chapter assumes a working setup of OpenStack following `OpenStack -Installation Tutorial `_. - diff --git a/install-guide/source/install-debian-manual.rst b/install-guide/source/install-debian-manual.rst deleted file mode 100644 index 557c7a62..00000000 --- a/install-guide/source/install-debian-manual.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _install-debian-manual: - -Install and configure for Debian -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Container -Infrastructure Management service for Debian. - -.. include:: common/prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the common and library packages: - - .. code-block:: console - - # DEBIAN_FRONTEND=noninteractive apt-get install magnum-api magnum-conductor - -.. include:: common/configure_2_edit_magnum_conf.rst - -.. include:: common/configure_3_populate_database.rst - -Finalize installation ---------------------- - -* Restart the Container Infrastructure Management services: - - .. code-block:: console - - # service magnum-api restart - # service magnum-conductor restart diff --git a/install-guide/source/install-obs.rst b/install-guide/source/install-obs.rst deleted file mode 100644 index 69a98311..00000000 --- a/install-guide/source/install-obs.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. _install-obs: - -Install and configure for openSUSE and SUSE Linux Enterprise -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Container -Infrastructure Management service for openSUSE Leap 42.2 and SUSE Linux -Enterprise Server 12 SP2. - -.. include:: common/prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # zypper install openstack-magnum-api openstack-magnum-conductor python-magnumclient - -.. include:: common/configure_2_edit_magnum_conf.rst - -.. include:: common/configure_3_populate_database.rst - -Finalize installation ---------------------- - -* Start the Container Infrastructure Management services and configure - them to start when the system boots: - - .. code-block:: console - - # systemctl enable openstack-magnum-api.service \ - openstack-magnum-conductor.service - # systemctl start openstack-magnum-api.service \ - openstack-magnum-conductor.service diff --git a/install-guide/source/install-rdo.rst b/install-guide/source/install-rdo.rst deleted file mode 100644 index 95bfe717..00000000 --- a/install-guide/source/install-rdo.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. _install-rdo: - -Install and configure for Red Hat Enterprise Linux and CentOS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Container -Infrastructure Management service for Red Hat Enterprise Linux 7 and CentOS 7. - -.. include:: common/prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # yum install openstack-magnum-api openstack-magnum-conductor python-magnumclient - -.. include:: common/configure_2_edit_magnum_conf.rst - -* Additionally, edit the ``/etc/magnum/magnum.conf`` file: - - * In the ``[oslo_concurrency]`` section, configure the ``lock_path``: - - .. code-block:: ini - - [oslo_concurrency] - ... - lock_path = /var/lib/magnum/tmp - -.. include:: common/configure_3_populate_database.rst - -Finalize installation ---------------------- - -* Start the Container Infrastructure Management services and configure - them to start when the system boots: - - .. code-block:: console - - # systemctl enable openstack-magnum-api.service \ - openstack-magnum-conductor.service - # systemctl start openstack-magnum-api.service \ - openstack-magnum-conductor.service diff --git a/install-guide/source/install-ubuntu.rst b/install-guide/source/install-ubuntu.rst deleted file mode 100644 index ed09de5b..00000000 --- a/install-guide/source/install-ubuntu.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _install-ubuntu: - -Install and configure for Ubuntu -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Container -Infrastructure Management service for Ubuntu 14.04 (LTS). - -.. include:: common/prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the common and library packages: - - .. code-block:: console - - # DEBIAN_FRONTEND=noninteractive apt-get install magnum-api magnum-conductor python-magnumclient - -.. include:: common/configure_2_edit_magnum_conf.rst - -.. include:: common/configure_3_populate_database.rst - -Finalize installation ---------------------- - -* Restart the Container Infrastructure Management services: - - .. code-block:: console - - # service magnum-api restart - # service magnum-conductor restart diff --git a/install-guide/source/install.rst b/install-guide/source/install.rst deleted file mode 100644 index 60050991..00000000 --- a/install-guide/source/install.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. _install: - -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Container -Infrastructure Management service, code-named magnum, on the controller node. - -This section assumes that you already have a working OpenStack environment with -at least the following components installed: Identity service, Image service, -Compute service, Networking service, Block Storage service and Orchestration -service. See `OpenStack Install Guides `__. - -To provide access to Docker Swarm or Kubernetes using the native clients -(docker or kubectl, respectively) magnum uses TLS certificates. To store the -certificates, it is recommended to use the `Key Manager service, code-named -barbican `__, or you can save them in magnum's database. - -Optionally, you can install the following components: - -- `Load Balancer as a Service (LBaaS v2) `__ to create clusters with multiple - masters -- `Bare Metal service `__ to create baremetal clusters -- `Object Storage service `__ to make private Docker registries available to - users -- `Telemetry Data Collection service `__ to periodically send - magnum-related metrics - -.. note:: - - Installation and configuration vary by distribution. - -.. important:: - - Magnum creates clusters of compute instances on the Compute service (nova). - These instances must have basic Internet connectivity and must be able to - reach magnum's API server. Make sure that the Compute and Network services - are configured accordingly. - -.. toctree:: - :maxdepth: 2 - - install-debian-manual.rst - install-obs.rst - install-rdo.rst - install-ubuntu.rst diff --git a/install-guide/source/launch-instance.rst b/install-guide/source/launch-instance.rst deleted file mode 100644 index 18f3cfe1..00000000 --- a/install-guide/source/launch-instance.rst +++ /dev/null @@ -1,453 +0,0 @@ -.. _launch-instance: - -Launch an instance -~~~~~~~~~~~~~~~~~~ - -In environments that include the Container Infrastructure Management service, -you can provision container clusters made up of virtual machines or baremetal -servers. The Container Infrastructure Management service uses `Cluster Templates -`__ -to describe how a `Cluster `__ is constructed. In each of the following examples -you will create a Cluster Template for a specific COE and then you will -provision a Cluster using the corresponding Cluster Template. Then, you can use -the appropriate COE client or endpoint to create containers. - -Create an external network (Optional) -------------------------------------- - -To create a magnum cluster, you need an external network. If there are no -external networks, create one. - -#. Create an external network with an appropriate provider based on your - cloud provider support for your case: - - .. code-block:: console - - $ openstack network create public --provider-network-type vxlan \ - --external \ - --project service - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | | - | created_at | 2017-03-27T10:09:04Z | - | description | | - | dns_domain | None | - | id | 372170ca-7d2e-48a2-8449-670e4ab66c23 | - | ipv4_address_scope | None | - | ipv6_address_scope | None | - | is_default | False | - | mtu | 1450 | - | name | public | - | port_security_enabled | True | - | project_id | 224c32c0dd2e49cbaadfd1cda069f149 | - | provider:network_type | vxlan | - | provider:physical_network | None | - | provider:segmentation_id | 3 | - | qos_policy_id | None | - | revision_number | 4 | - | router:external | External | - | segments | None | - | shared | False | - | status | ACTIVE | - | subnets | | - | updated_at | 2017-03-27T10:09:04Z | - +---------------------------+--------------------------------------+ - $ openstack subnet create public-subnet --network public \ - --subnet-range 192.168.1.0/24 \ - --gateway 192.168.1.1 \ - --ip-version 4 - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | allocation_pools | 192.168.1.2-192.168.1.254 | - | cidr | 192.168.1.0/24 | - | created_at | 2017-03-27T10:46:15Z | - | description | | - | dns_nameservers | | - | enable_dhcp | True | - | gateway_ip | 192.168.1.1 | - | host_routes | | - | id | 04185f6c-ea31-4109-b20b-fd7f935b3828 | - | ip_version | 4 | - | ipv6_address_mode | None | - | ipv6_ra_mode | None | - | name | public-subnet | - | network_id | 372170ca-7d2e-48a2-8449-670e4ab66c23 | - | project_id | d9e40a0aff30441083d9f279a0ff50de | - | revision_number | 2 | - | segment_id | None | - | service_types | | - | subnetpool_id | None | - | updated_at | 2017-03-27T10:46:15Z | - +-------------------+--------------------------------------+ - -Create a keypair (Optional) ---------------------------- - -To create a magnum cluster, you need a keypair which will be passed -in all compute instances of the cluster. If you don't have a keypair -in your project, create one. - -#. Create a keypair on the Compute service: - - .. code-block:: console - - $ openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey - +-------------+-------------------------------------------------+ - | Field | Value | - +-------------+-------------------------------------------------+ - | fingerprint | 05:be:32:07:58:a7:e8:0b:05:9b:81:6d:80:9a:4e:b1 | - | name | mykey | - | user_id | 2d4398dbd5274707bf100a9dbbe85819 | - +-------------+-------------------------------------------------+ - -Upload the images required for your clusters to the Image service ------------------------------------------------------------------ - -The VM versions of Kubernetes and Docker Swarm drivers require a Fedora Atomic -image. The following is stock Fedora Atomic image, built by the Atomic team -and tested by the Magnum team. - -#. Download the image: - - .. code-block:: console - - $ wget https://fedorapeople.org/groups/magnum/fedora-atomic-latest.qcow2 - -#. Register the image to the Image service setting the ``os_distro`` property - to ``fedora-atomic``: - - .. code-block:: console - - $ openstack image create \ - --disk-format=qcow2 \ - --container-format=bare \ - --file=fedora-atomic-latest.qcow2 \ - --property os_distro='fedora-atomic' \ - fedora-atomic-latest - +------------------+------------------------------------------------------+ - | Field | Value | - +------------------+------------------------------------------------------+ - | checksum | a987b691e23dce54c03d7a57c104b195 | - | container_format | bare | - | created_at | 2016-09-14T12:58:01Z | - | disk_format | qcow2 | - | file | /v2/images/81b25935-3400-441a-9f2e-f984a46c89dd/file | - | id | 81b25935-3400-441a-9f2e-f984a46c89dd | - | min_disk | 0 | - | min_ram | 0 | - | name | fedora-atomic-latest | - | owner | c4b42942156741dfbc4775dbcb032841 | - | properties | os_distro='fedora-atomic' | - | protected | False | - | schema | /v2/schemas/image | - | size | 507928064 | - | status | active | - | tags | | - | updated_at | 2016-09-14T12:58:03Z | - | virtual_size | None | - | visibility | private | - +------------------+------------------------------------------------------+ - -Provision a Docker Swarm cluster and create a container -------------------------------------------------------- - -Following this example, you will provision a Docker Swarm cluster with one -master and one node. Then, using docker's native API you will create a -container. - - -#. Create a cluster template for a Docker Swarm cluster using the - ``fedora-atomic-latest`` image, ``m1.small`` as the flavor for the master - and the node, ``public`` as the external network and ``8.8.8.8`` for the - DNS nameserver, using the following command: - - .. code-block:: console - - $ magnum cluster-template-create swarm-cluster-template \ - --image fedora-atomic-latest \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --master-flavor m1.small \ - --flavor m1.small \ - --coe swarm - +-----------------------+--------------------------------------+ - | Property | Value | - +-----------------------+--------------------------------------+ - | insecure_registry | - | - | labels | {} | - | updated_at | - | - | floating_ip_enabled | True | - | fixed_subnet | - | - | master_flavor_id | m1.small | - | uuid | 47c6ce77-50ae-43bd-8e2a-06980392693d | - | no_proxy | - | - | https_proxy | - | - | tls_disabled | False | - | keypair_id | mykey | - | public | False | - | http_proxy | - | - | docker_volume_size | - | - | server_type | vm | - | external_network_id | public | - | cluster_distro | fedora-atomic | - | image_id | fedora-atomic-latest | - | volume_driver | - | - | registry_enabled | False | - | docker_storage_driver | devicemapper | - | apiserver_port | - | - | name | swarm-cluster-template | - | created_at | 2016-09-14T13:05:11+00:00 | - | network_driver | docker | - | fixed_network | - | - | coe | swarm | - | flavor_id | m1.small | - | master_lb_enabled | False | - | dns_nameserver | 8.8.8.8 | - +-----------------------+--------------------------------------+ - -#. Create a cluster with one node and one master using ``mykey`` as the - keypair, using the following command: - - .. code-block:: console - - $ magnum cluster-create swarm-cluster \ - --cluster-template swarm-cluster-template \ - --master-count 1 \ - --node-count 1 \ - --keypair mykey - Request to create cluster 2582f192-480e-4329-ac05-32a8e5b1166b has been accepted. - - Your cluster is now being created. Creation time depends on your - infrastructure's performance. You can check the status of your cluster - using the commands: ``magnum cluster-list`` or - ``magnum cluster-show swarm-cluster``. - - .. code-block:: console - - $ magnum cluster-list - +--------------------------------------+---------------+---------+------------+--------------+-----------------+ - | uuid | name | keypair | node_count | master_count | status | - +--------------------------------------+---------------+---------+------------+--------------+-----------------+ - | 2582f192-480e-4329-ac05-32a8e5b1166b | swarm-cluster | mykey | 1 | 1 | CREATE_COMPLETE | - +--------------------------------------+---------------+---------+------------+--------------+-----------------+ - - .. code-block:: console - - $ magnum cluster-show swarm-cluster - +---------------------+------------------------------------------------------------+ - | Property | Value | - +---------------------+------------------------------------------------------------+ - | status | CREATE_COMPLETE | - | cluster_template_id | 47c6ce77-50ae-43bd-8e2a-06980392693d | - | uuid | 2582f192-480e-4329-ac05-32a8e5b1166b | - | stack_id | 3d7bbf1c-49bd-4930-84e0-ab71ba200687 | - | status_reason | Stack CREATE completed successfully | - | created_at | 2016-09-14T13:36:54+00:00 | - | name | swarm-cluster | - | updated_at | 2016-09-14T13:38:08+00:00 | - | discovery_url | https://discovery.etcd.io/a5ece414689287eca62e35555512bfd5 | - | api_address | tcp://172.24.4.10:2376 | - | coe_version | 1.2.5 | - | master_addresses | ['172.24.4.10'] | - | create_timeout | 60 | - | node_addresses | ['172.24.4.8'] | - | master_count | 1 | - | container_version | 1.12.6 | - | node_count | 1 | - +---------------------+------------------------------------------------------------+ - -#. Add the credentials of the above cluster to your environment: - - .. code-block:: console - - $ mkdir myclusterconfig - $ $(magnum cluster-config swarm-cluster --dir myclusterconfig) - - - The above command will save the authentication artifacts in the - `myclusterconfig` directory and it will export the environment - variables: DOCKER_HOST, DOCKER_CERT_PATH and DOCKER_TLS_VERIFY. - Sample output: - - .. code-block:: console - - export DOCKER_HOST=tcp://172.24.4.10:2376 - export DOCKER_CERT_PATH=myclusterconfig - export DOCKER_TLS_VERIFY=True - -#. Create a container: - - .. code-block:: console - - $ docker run busybox echo "Hello from Docker!" - Hello from Docker! - -#. Delete the cluster: - - .. code-block:: console - - $ magnum cluster-delete swarm-cluster - Request to delete cluster swarm-cluster has been accepted. - -Provision a Kubernetes cluster and create a deployment ------------------------------------------------------- - -Following this example, you will provision a Kubernetes cluster with one -master and one node. Then, using Kubernetes's native client ``kubectl``, you -will create a deployment. - -#. Create a cluster template for a Kubernetes cluster using the - ``fedora-atomic-latest`` image, ``m1.small`` as the flavor for the master - and the node, ``public`` as the external network and ``8.8.8.8`` for the - DNS nameserver, using the following command: - - .. code-block:: console - - $ magnum cluster-template-create kubernetes-cluster-template \ - --image fedora-atomic-latest \ - --external-network public \ - --dns-nameserver 8.8.8.8 \ - --master-flavor m1.small \ - --flavor m1.small \ - --coe kubernetes - +-----------------------+--------------------------------------+ - | Property | Value | - +-----------------------+--------------------------------------+ - | insecure_registry | - | - | labels | {} | - | updated_at | - | - | floating_ip_enabled | True | - | fixed_subnet | - | - | master_flavor_id | m1.small | - | uuid | 0a601cc4-8fef-41aa-8036-d113e719ed7a | - | no_proxy | - | - | https_proxy | - | - | tls_disabled | False | - | keypair_id | - | - | public | False | - | http_proxy | - | - | docker_volume_size | - | - | server_type | vm | - | external_network_id | public | - | cluster_distro | fedora-atomic | - | image_id | fedora-atomic-latest | - | volume_driver | - | - | registry_enabled | False | - | docker_storage_driver | devicemapper | - | apiserver_port | - | - | name | kubernetes-cluster-template | - | created_at | 2017-05-16T09:53:00+00:00 | - | network_driver | flannel | - | fixed_network | - | - | coe | kubernetes | - | flavor_id | m1.small | - | master_lb_enabled | False | - | dns_nameserver | 8.8.8.8 | - +-----------------------+--------------------------------------+ - -#. Create a cluster with one node and one master using ``mykey`` as the - keypair, using the following command: - - .. code-block:: console - - $ magnum cluster-create kubernetes-cluster \ - --cluster-template kubernetes-cluster-template \ - --master-count 1 \ - --node-count 1 \ - --keypair mykey - Request to create cluster b1ef3528-ac03-4459-bbf7-22649bfbc84f has been accepted. - - Your cluster is now being created. Creation time depends on your - infrastructure's performance. You can check the status of your cluster - using the commands: ``magnum cluster-list`` or - ``magnum cluster-show kubernetes-cluster``. - - .. code-block:: console - - $ magnum cluster-list - +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ - | uuid | name | keypair | node_count | master_count | status | - +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ - | b1ef3528-ac03-4459-bbf7-22649bfbc84f | kubernetes-cluster | mykey | 1 | 1 | CREATE_COMPLETE | - +--------------------------------------+--------------------+---------+------------+--------------+-----------------+ - - .. code-block:: console - - $ magnum cluster-show kubernetes-cluster - +---------------------+------------------------------------------------------------+ - | Property | Value | - +---------------------+------------------------------------------------------------+ - | status | CREATE_COMPLETE | - | cluster_template_id | 0a601cc4-8fef-41aa-8036-d113e719ed7a | - | node_addresses | ['172.24.4.5'] | - | uuid | b1ef3528-ac03-4459-bbf7-22649bfbc84f | - | stack_id | 8296624c-3c0e-45e1-967e-b6ff05105a3b | - | status_reason | Stack CREATE completed successfully | - | created_at | 2017-05-16T09:58:02+00:00 | - | updated_at | 2017-05-16T10:00:02+00:00 | - | coe_version | v1.5.3 | - | keypair | default | - | api_address | https://172.24.4.13:6443 | - | master_addresses | ['172.24.4.13'] | - | create_timeout | 60 | - | node_count | 1 | - | discovery_url | https://discovery.etcd.io/69c7cd3b3b06c98b4771410bd166a7c6 | - | master_count | 1 | - | container_version | 1.12.6 | - | name | kubernetes-cluster | - +---------------------+------------------------------------------------------------+ - -#. Add the credentials of the above cluster to your environment: - - .. code-block:: console - - $ mkdir -p ~/clusters/kubernetes-cluster - $ $(magnum cluster-config kubernetes-cluster --dir ~/clusters/kubernetes-cluster) - - - The above command will save the authentication artifacts in the directory - ``~/clusters/kubernetes-cluster`` and it will export the ``KUBECONFIG`` - environment variable: - - .. code-block:: console - - export KUBECONFIG=/home/user/clusters/kubernetes-cluster/config - -#. You can list the controller components of your Kubernetes cluster and - check if they are ``Running``: - - .. code-block:: console - - $ kubectl -n kube-system get po - NAME READY STATUS RESTARTS AGE - kube-controller-manager-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h - kube-proxy-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h - kube-proxy-ku-wmmticfvdr-0-k53p22xmlxvx-kube-minion-x4ly6zfhrrui 1/1 Running 0 1h - kube-scheduler-ku-hesuip7l3i-0-5mqijvszepxw-kube-master-rqwmwne7rjh2 1/1 Running 0 1h - kubernetes-dashboard-3203831700-zvj2d 1/1 Running 0 1h - -#. Now, you can create a nginx deployment and verify it is running: - - .. code-block:: console - - $ kubectl run nginx --image=nginx --replicas=5 - deployment "nginx" created - $ kubectl get po - NAME READY STATUS RESTARTS AGE - nginx-701339712-2ngt8 1/1 Running 0 15s - nginx-701339712-j8r3d 1/1 Running 0 15s - nginx-701339712-mb6jb 1/1 Running 0 15s - nginx-701339712-q115k 1/1 Running 0 15s - nginx-701339712-tb5lp 1/1 Running 0 15s - -#. Delete the cluster: - - .. code-block:: console - - $ magnum cluster-delete kubernetes-cluster - Request to delete cluster kubernetes-cluster has been accepted. diff --git a/install-guide/source/next-steps.rst b/install-guide/source/next-steps.rst deleted file mode 100644 index 6583c0f9..00000000 --- a/install-guide/source/next-steps.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the magnum service. - -To add more services, see the `additional documentation on installing OpenStack -`_ . diff --git a/install-guide/source/verify.rst b/install-guide/source/verify.rst deleted file mode 100644 index e39a6c73..00000000 --- a/install-guide/source/verify.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the Container Infrastructure Management service. - -.. note:: - - Perform these commands on the controller node. - -#. Source the ``admin`` tenant credentials: - - .. code-block:: console - - $ . admin-openrc - -#. To list out the health of the internal services, namely conductor, of - magnum, use: - - .. code-block:: console - - $ magnum service-list - +----+-----------------------+------------------+-------+ - | id | host | binary | state | - +----+-----------------------+------------------+-------+ - | 1 | controller | magnum-conductor | up | - +----+-----------------------+------------------+-------+ - - .. note:: - - This output should indicate a ``magnum-conductor`` component - on the controller node. diff --git a/magnum/__init__.py b/magnum/__init__.py deleted file mode 100644 index 222507a6..00000000 --- a/magnum/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -import pbr.version - - -__version__ = pbr.version.VersionInfo( - 'magnum').version_string() - -# Make a project global TLS trace storage repository -TLS = threading.local() diff --git a/magnum/api/__init__.py b/magnum/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/api/app.py b/magnum/api/app.py deleted file mode 100755 index cae197aa..00000000 --- a/magnum/api/app.py +++ /dev/null @@ -1,67 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from oslo_config import cfg -from oslo_log import log -from paste import deploy -import pecan - -from magnum.api import config as api_config -from magnum.api import middleware -from magnum.common import config as common_config -import magnum.conf - -CONF = magnum.conf.CONF - -LOG = log.getLogger(__name__) - - -def get_pecan_config(): - # Set up the pecan configuration - filename = api_config.__file__.replace('.pyc', '.py') - return pecan.configuration.conf_from_file(filename) - - -def setup_app(config=None): - if not config: - config = get_pecan_config() - - app_conf = dict(config.app) - common_config.set_config_defaults() - - app = pecan.make_app( - app_conf.pop('root'), - logging=getattr(config, 'logging', {}), - wrap_app=middleware.ParsableErrorMiddleware, - **app_conf - ) - - return app - - -def load_app(): - cfg_file = None - cfg_path = CONF.api.api_paste_config - if not os.path.isabs(cfg_path): - cfg_file = CONF.find_file(cfg_path) - elif os.path.exists(cfg_path): - cfg_file = cfg_path - - if not cfg_file: - raise cfg.ConfigFilesNotFoundError([CONF.api.api_paste_config]) - LOG.info("Full WSGI config used: %s", cfg_file) - return deploy.loadapp("config:" + cfg_file) - - -def app_factory(global_config, **local_conf): - return setup_app() diff --git a/magnum/api/app.wsgi b/magnum/api/app.wsgi deleted file mode 100644 index 62cd8616..00000000 --- a/magnum/api/app.wsgi +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: python -*- -# -# Copyright 2017 SUSE Linux GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -from magnum.api import app as api_app -from magnum.common import service - -service.prepare_service(sys.argv) - -application = api_app.load_app() diff --git a/magnum/api/attr_validator.py b/magnum/api/attr_validator.py deleted file mode 100644 index ade2a19a..00000000 --- a/magnum/api/attr_validator.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright 2015 EasyStack, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glanceclient import exc as glance_exception -from novaclient import exceptions as nova_exception -from oslo_serialization import jsonutils as json - -from magnum.api import utils as api_utils -from magnum.common import clients -from magnum.common import exception -from magnum.i18n import _ - - -SUPPORTED_ISOLATION = ['filesystem/posix', 'filesystem/linux', - 'filesystem/shared', 'posix/cpu', - 'posix/mem', 'posix/disk', 'cgroups/cpu', - 'cgroups/mem', 'docker/runtime', - 'namespaces/pid'] -SUPPORTED_IMAGE_PROVIDERS = ['docker', 'appc'] -SUPPORTED_SWARM_STRATEGY = ['spread', 'binpack', 'random'] - - -def validate_image(cli, image): - """Validate image""" - - try: - image_found = api_utils.get_openstack_resource(cli.glance().images, - image, 'images') - except (glance_exception.NotFound, exception.ResourceNotFound): - raise exception.ImageNotFound(image_id=image) - except glance_exception.HTTPForbidden: - raise exception.ImageNotAuthorized(image_id=image) - if not image_found.get('os_distro'): - raise exception.OSDistroFieldNotFound(image_id=image) - return image_found - - -def validate_flavor(cli, flavor): - """Validate flavor. - - If flavor is None, skip the validation and use the default value - from the heat template. - """ - - if flavor is None: - return - flavor_list = cli.nova().flavors.list() - for f in flavor_list: - if f.name == flavor or f.id == flavor: - return - raise exception.FlavorNotFound(flavor=flavor) - - -def validate_keypair(cli, keypair): - """Validate keypair""" - - try: - cli.nova().keypairs.get(keypair) - except nova_exception.NotFound: - raise exception.KeyPairNotFound(keypair=keypair) - - -def validate_external_network(cli, external_network): - """Validate external network""" - - count = 0 - ext_filter = {'router:external': True} - networks = cli.neutron().list_networks(**ext_filter) - for net in networks.get('networks'): - if (net.get('name') == external_network or - net.get('id') == external_network): - count = count + 1 - - if count == 0: - # Unable to find the external network. - # Or the network is private. - raise exception.ExternalNetworkNotFound(network=external_network) - - if count > 1: - msg = _("Multiple external networks exist with same name '%s'. " - "Please use the external network ID instead.") - raise exception.Conflict(msg % external_network) - - -def validate_fixed_network(cli, fixed_network): - """Validate fixed network""" - - # TODO(houming):this method implement will be added after this - # first pathch for Cluster's OpenStack resources validation is merged. - pass - - -def validate_labels(labels): - """"Validate labels""" - - for attr, validate_method in labels_validators.items(): - if labels.get(attr) is not None: - validate_method(labels) - - -def validate_labels_isolation(labels): - """Validate mesos_slave_isolation""" - mesos_slave_isolation = labels.get('mesos_slave_isolation') - mesos_slave_isolation_list = mesos_slave_isolation.split(',') - unsupported_isolations = set(mesos_slave_isolation_list) - set( - SUPPORTED_ISOLATION) - if (len(unsupported_isolations) > 0): - raise exception.InvalidParameterValue(_( - 'property "labels/mesos_slave_isolation" with value ' - '"%(isolation_val)s" is not supported, supported values are: ' - '%(supported_isolation)s') % { - 'isolation_val': ', '.join(list(unsupported_isolations)), - 'supported_isolation': ', '.join( - SUPPORTED_ISOLATION + ['unspecified'])}) - - -def validate_labels_image_providers(labels): - """Validate mesos_slave_image_providers""" - mesos_slave_image_providers = labels.get('mesos_slave_image_providers') - mesos_slave_image_providers_list = mesos_slave_image_providers.split(',') - isolation_with_valid_data = False - for image_providers_val in mesos_slave_image_providers_list: - image_providers_val = image_providers_val.lower() - if image_providers_val not in SUPPORTED_IMAGE_PROVIDERS: - raise exception.InvalidParameterValue(_( - 'property "labels/mesos_slave_image_providers" with value ' - '"%(image_providers)s" is not supported, supported values ' - 'are: %(supported_image_providers)s') % { - 'image_providers': image_providers_val, - 'supported_image_providers': ', '.join( - SUPPORTED_IMAGE_PROVIDERS + ['unspecified'])}) - - if image_providers_val == 'docker': - mesos_slave_isolation = labels.get('mesos_slave_isolation') - if mesos_slave_isolation is not None: - mesos_slave_isolation_list = mesos_slave_isolation.split(',') - for isolations_val in mesos_slave_isolation_list: - if isolations_val == 'docker/runtime': - isolation_with_valid_data = True - if mesos_slave_isolation is None or not isolation_with_valid_data: - raise exception.RequiredParameterNotProvided(_( - "Docker runtime isolator has to be specified if 'docker' " - "is included in 'mesos_slave_image_providers' Please add " - "'docker/runtime' to 'mesos_slave_isolation' labels " - "flags")) - - -def validate_labels_executor_env_variables(labels): - """Validate executor_environment_variables""" - mesos_slave_executor_env_val = labels.get( - 'mesos_slave_executor_env_variables') - try: - json.loads(mesos_slave_executor_env_val) - except ValueError: - err = (_("Json format error")) - raise exception.InvalidParameterValue(err) - - -def validate_labels_strategy(labels): - """Validate swarm_strategy""" - swarm_strategy = list(labels.get('swarm_strategy', "").split()) - unsupported_strategy = set(swarm_strategy) - set( - SUPPORTED_SWARM_STRATEGY) - if (len(unsupported_strategy) > 0): - raise exception.InvalidParameterValue(_( - 'property "labels/swarm_strategy" with value ' - '"%(strategy)s" is not supported, supported values are: ' - '%(supported_strategies)s') % { - 'strategy': ' '.join(list(unsupported_strategy)), - 'supported_strategies': ', '.join( - SUPPORTED_SWARM_STRATEGY + ['unspecified'])}) - - -def validate_os_resources(context, cluster_template, cluster=None): - """Validate ClusterTemplate's OpenStack Resources""" - - cli = clients.OpenStackClients(context) - - for attr, validate_method in validators.items(): - if attr in cluster_template and cluster_template[attr] is not None: - if attr != 'labels': - validate_method(cli, cluster_template[attr]) - else: - validate_method(cluster_template[attr]) - - if cluster: - validate_keypair(cli, cluster['keypair']) - - -def validate_master_count(cluster, cluster_template): - if cluster['master_count'] > 1 and \ - not cluster_template['master_lb_enabled']: - raise exception.InvalidParameterValue(_( - "master_count must be 1 when master_lb_enabled is False")) - - -# Dictionary that maintains a list of validation functions -validators = {'image_id': validate_image, - 'flavor_id': validate_flavor, - 'master_flavor_id': validate_flavor, - 'external_network_id': validate_external_network, - 'fixed_network': validate_fixed_network, - 'labels': validate_labels} - -labels_validators = {'mesos_slave_isolation': validate_labels_isolation, - 'mesos_slave_image_providers': - validate_labels_image_providers, - 'mesos_slave_executor_env_variables': - validate_labels_executor_env_variables, - 'swarm_strategy': validate_labels_strategy} diff --git a/magnum/api/config.py b/magnum/api/config.py deleted file mode 100644 index bb1ef88a..00000000 --- a/magnum/api/config.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2013 - Noorul Islam K M -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.api import hooks - -# Pecan Application Configurations -app = { - 'root': 'magnum.api.controllers.root.RootController', - 'modules': ['magnum.api'], - 'debug': False, - 'hooks': [ - hooks.ContextHook(), - hooks.RPCHook(), - hooks.NoExceptionTracebackHook(), - ], - 'acl_public_routes': [ - '/', - '/v1', - ], -} - -# Custom Configurations must be in Python dictionary format:: -# -# foo = {'bar':'baz'} -# -# All configurations are accessible at:: -# pecan.conf diff --git a/magnum/api/controllers/__init__.py b/magnum/api/controllers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/api/controllers/base.py b/magnum/api/controllers/base.py deleted file mode 100644 index 48a13b63..00000000 --- a/magnum/api/controllers/base.py +++ /dev/null @@ -1,229 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import operator -import six - -from magnum.api.controllers import versions -from magnum.api import versioned_method -from magnum.common import exception -from magnum.i18n import _ -from pecan import rest -from webob import exc -import wsme -from wsme import types as wtypes - - -# name of attribute to keep version method information -VER_METHOD_ATTR = 'versioned_methods' - - -class APIBase(wtypes.Base): - - created_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is created""" - - updated_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is updated""" - - def as_dict(self): - """Render this object as a dict of its fields.""" - return {k: getattr(self, k) - for k in self.fields - if hasattr(self, k) and - getattr(self, k) != wsme.Unset} - - def unset_fields_except(self, except_list=None): - """Unset fields so they don't appear in the message body. - - :param except_list: A list of fields that won't be touched. - - """ - if except_list is None: - except_list = [] - - for k in self.as_dict(): - if k not in except_list: - setattr(self, k, wsme.Unset) - - -class ControllerMetaclass(type): - """Controller metaclass. - - This metaclass automates the task of assembling a dictionary - mapping action keys to method names. - """ - - def __new__(mcs, name, bases, cls_dict): - """Adds version function dictionary to the class.""" - - versioned_methods = None - - for base in bases: - if base.__name__ == "Controller": - # NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute - # between API controller class creations. This allows us - # to use a class decorator on the API methods that doesn't - # require naming explicitly what method is being versioned as - # it can be implicit based on the method decorated. It is a bit - # ugly. - if VER_METHOD_ATTR in base.__dict__: - versioned_methods = getattr(base, VER_METHOD_ATTR) - delattr(base, VER_METHOD_ATTR) - - if versioned_methods: - cls_dict[VER_METHOD_ATTR] = versioned_methods - - return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, - cls_dict) - - -@six.add_metaclass(ControllerMetaclass) -class Controller(rest.RestController): - """Base Rest Controller""" - - def __getattribute__(self, key): - - def version_select(): - """Select the correct method based on version - - @return: Returns the correct versioned method - @raises: HTTPNotAcceptable if there is no method which - matches the name and version constraints - """ - - from pecan import request - ver = request.version - - func_list = self.versioned_methods[key] - for func in func_list: - if ver.matches(func.start_version, func.end_version): - return func.func - - raise exc.HTTPNotAcceptable(_( - "Version %(ver)s was requested but the requested API %(api)s " - "is not supported for this version.") % {'ver': ver, - 'api': key}) - - try: - version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) - except AttributeError: - # No versioning on this class - return object.__getattribute__(self, key) - if version_meth_dict and key in version_meth_dict: - return version_select().__get__(self, self.__class__) - - return object.__getattribute__(self, key) - - # NOTE: This decorator MUST appear first (the outermost - # decorator) on an API method for it to work correctly - @classmethod - def api_version(cls, min_ver, max_ver=None): - """Decorator for versioning api methods. - - Add the decorator to any pecan method that has been exposed. - This decorator will store the method, min version, and max - version in a list for each api. It will check that there is no - overlap between versions and methods. When the api is called the - controller will use the list for each api to determine which - method to call. - - Example: - @base.Controller.api_version("1.1", "1.2") - @expose.expose(Cluster, types.uuid_or_name) - def get_one(self, cluster_ident): - {...code for versions 1.1 to 1.2...} - - @base.Controller.api_version("1.3") - @expose.expose(Cluster, types.uuid_or_name) - def get_one(self, cluster_ident): - {...code for versions 1.3 to latest} - - @min_ver: string representing minimum version - @max_ver: optional string representing maximum version - @raises: ApiVersionsIntersect if an version overlap is found between - method versions. - """ - - def decorator(f): - obj_min_ver = versions.Version('', '', '', min_ver) - if max_ver: - obj_max_ver = versions.Version('', '', '', max_ver) - else: - obj_max_ver = versions.Version('', '', '', - versions.CURRENT_MAX_VER) - - # Add to list of versioned methods registered - func_name = f.__name__ - new_func = versioned_method.VersionedMethod( - func_name, obj_min_ver, obj_max_ver, f) - - func_dict = getattr(cls, VER_METHOD_ATTR, {}) - if not func_dict: - setattr(cls, VER_METHOD_ATTR, func_dict) - - func_list = func_dict.get(func_name, []) - if not func_list: - func_dict[func_name] = func_list - func_list.append(new_func) - - is_intersect = Controller.check_for_versions_intersection( - func_list) - - if is_intersect: - raise exception.ApiVersionsIntersect( - name=new_func.name, - min_ver=new_func.start_version, - max_ver=new_func.end_version - ) - - # Ensure the list is sorted by minimum version (reversed) - # so later when we work through the list in order we find - # the method which has the latest version which supports - # the version requested. - func_list.sort(key=lambda f: f.start_version, reverse=True) - - return f - - return decorator - - @staticmethod - def check_for_versions_intersection(func_list): - """Determines whether function list intersections - - General algorithm: - https://en.wikipedia.org/wiki/Intersection_algorithm - - :param func_list: list of VersionedMethod objects - :return: boolean - """ - - pairs = [] - counter = 0 - - for f in func_list: - pairs.append((f.start_version, 1)) - pairs.append((f.end_version, -1)) - - pairs.sort(key=operator.itemgetter(1), reverse=True) - pairs.sort(key=operator.itemgetter(0)) - - for p in pairs: - counter += p[1] - - if counter > 1: - return True - - return False diff --git a/magnum/api/controllers/link.py b/magnum/api/controllers/link.py deleted file mode 100644 index 5007b1cb..00000000 --- a/magnum/api/controllers/link.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from wsme import types as wtypes - -from magnum.api.controllers import base - - -def build_url(resource, resource_args, bookmark=False, base_url=None): - if base_url is None: - base_url = pecan.request.host_url - - template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' - # FIXME(lucasagomes): I'm getting a 404 when doing a GET on - # a nested resource that the URL ends with a '/'. - # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs - template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' - return template % {'url': base_url, 'res': resource, 'args': resource_args} - - -class Link(base.APIBase): - """A link representation.""" - - href = wtypes.text - """The url of a link.""" - - rel = wtypes.text - """The name of a link.""" - - type = wtypes.text - """Indicates the type of document/link.""" - - @staticmethod - def make_link(rel_name, url, resource, resource_args, - bookmark=False, type=wtypes.Unset): - href = build_url(resource, resource_args, - bookmark=bookmark, base_url=url) - return Link(href=href, rel=rel_name, type=type) - - @classmethod - def sample(cls): - sample = cls(href="http://localhost:9511/clusters/" - "eaaca217-e7d8-47b4-bb41-3f99f20eed89", - rel="bookmark") - return sample diff --git a/magnum/api/controllers/root.py b/magnum/api/controllers/root.py deleted file mode 100644 index 0d29117d..00000000 --- a/magnum/api/controllers/root.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from pecan import rest -from wsme import types as wtypes - -from magnum.api.controllers import base -from magnum.api.controllers import link -from magnum.api.controllers import v1 -from magnum.api.controllers import versions -from magnum.api import expose - - -class Version(base.APIBase): - """An API version representation.""" - - id = wtypes.text - """The ID of the version, also acts as the release number""" - - links = [link.Link] - """A Link that point to a specific version of the API""" - - status = wtypes.text - """The current status of the version: CURRENT, SUPPORTED, UNSUPPORTED""" - - max_version = wtypes.text - """The max microversion supported by this version""" - - min_version = wtypes.text - """The min microversion supported by this version""" - - @staticmethod - def convert(id, status, max, min): - version = Version() - version.id = id - version.links = [link.Link.make_link('self', pecan.request.host_url, - id, '', bookmark=True)] - version.status = status - version.max_version = max - version.min_version = min - return version - - -class Root(base.APIBase): - - name = wtypes.text - """The name of the API""" - - description = wtypes.text - """Some information about this API""" - - versions = [Version] - """Links to all the versions available in this API""" - - @staticmethod - def convert(): - root = Root() - root.name = "OpenStack Magnum API" - root.description = ("Magnum is an OpenStack project which aims to " - "provide container cluster management.") - root.versions = [Version.convert('v1', "CURRENT", - versions.CURRENT_MAX_VER, - versions.BASE_VER)] - return root - - -class RootController(rest.RestController): - - _versions = ['v1'] - """All supported API versions""" - - _default_version = 'v1' - """The default API version""" - - v1 = v1.Controller() - - @expose.expose(Root) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return Root.convert() - - @pecan.expose() - def _route(self, args): - """Overrides the default routing behavior. - - It redirects the request to the default version of the magnum API - if the version number is not specified in the url. - """ - - if args[0] and args[0] not in self._versions: - args = [self._default_version] + args - return super(RootController, self)._route(args) diff --git a/magnum/api/controllers/v1/__init__.py b/magnum/api/controllers/v1/__init__.py deleted file mode 100644 index 3c99b4ee..00000000 --- a/magnum/api/controllers/v1/__init__.py +++ /dev/null @@ -1,238 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Version 1 of the Magnum API - -NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED. -""" - -from oslo_log import log as logging -import pecan -from wsme import types as wtypes - -from magnum.api.controllers import base as controllers_base -from magnum.api.controllers import link -from magnum.api.controllers.v1 import bay -from magnum.api.controllers.v1 import baymodel -from magnum.api.controllers.v1 import certificate -from magnum.api.controllers.v1 import cluster -from magnum.api.controllers.v1 import cluster_template -from magnum.api.controllers.v1 import magnum_services -from magnum.api.controllers.v1 import quota -from magnum.api.controllers.v1 import stats -from magnum.api.controllers import versions as ver -from magnum.api import expose -from magnum.api import http_error -from magnum.i18n import _ - - -LOG = logging.getLogger(__name__) - -BASE_VERSION = 1 - -MIN_VER_STR = '%s %s' % (ver.Version.service_string, ver.BASE_VER) - -MAX_VER_STR = '%s %s' % (ver.Version.service_string, ver.CURRENT_MAX_VER) - -MIN_VER = ver.Version({ver.Version.string: MIN_VER_STR}, - MIN_VER_STR, MAX_VER_STR) -MAX_VER = ver.Version({ver.Version.string: MAX_VER_STR}, - MIN_VER_STR, MAX_VER_STR) - - -class MediaType(controllers_base.APIBase): - """A media type representation.""" - - base = wtypes.text - type = wtypes.text - - def __init__(self, base, type): - self.base = base - self.type = type - - -class V1(controllers_base.APIBase): - """The representation of the version 1 of the API.""" - - id = wtypes.text - """The ID of the version, also acts as the release number""" - - media_types = [MediaType] - """An array of supcontainersed media types for this version""" - - links = [link.Link] - """Links that point to a specific URL for this version and documentation""" - - baymodels = [link.Link] - """Links to the baymodels resource""" - - bays = [link.Link] - """Links to the bays resource""" - - clustertemplates = [link.Link] - """Links to the clustertemplates resource""" - - clusters = [link.Link] - """Links to the clusters resource""" - - quotas = [link.Link] - """Links to the quotas resource""" - - certificates = [link.Link] - """Links to the certificates resource""" - - mservices = [link.Link] - """Links to the magnum-services resource""" - - stats = [link.Link] - """Links to the stats resource""" - - @staticmethod - def convert(): - v1 = V1() - v1.id = "v1" - v1.links = [link.Link.make_link('self', pecan.request.host_url, - 'v1', '', bookmark=True), - link.Link.make_link('describedby', - 'http://docs.openstack.org', - 'developer/magnum/dev', - 'api-spec-v1.html', - bookmark=True, type='text/html')] - v1.media_types = [MediaType('application/json', - 'application/vnd.openstack.magnum.v1+json')] - v1.baymodels = [link.Link.make_link('self', pecan.request.host_url, - 'baymodels', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'baymodels', '', - bookmark=True)] - v1.bays = [link.Link.make_link('self', pecan.request.host_url, - 'bays', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'bays', '', - bookmark=True)] - v1.clustertemplates = [link.Link.make_link('self', - pecan.request.host_url, - 'clustertemplates', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'clustertemplates', '', - bookmark=True)] - v1.clusters = [link.Link.make_link('self', pecan.request.host_url, - 'clusters', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'clusters', '', - bookmark=True)] - v1.quotas = [link.Link.make_link('self', pecan.request.host_url, - 'quotas', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'quotas', '', - bookmark=True)] - v1.certificates = [link.Link.make_link('self', pecan.request.host_url, - 'certificates', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'certificates', '', - bookmark=True)] - v1.mservices = [link.Link.make_link('self', pecan.request.host_url, - 'mservices', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'mservices', '', - bookmark=True)] - v1.stats = [link.Link.make_link('self', pecan.request.host_url, - 'stats', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'stats', '', - bookmark=True)] - return v1 - - -class Controller(controllers_base.Controller): - """Version 1 API controller root.""" - - bays = bay.BaysController() - baymodels = baymodel.BayModelsController() - clusters = cluster.ClustersController() - clustertemplates = cluster_template.ClusterTemplatesController() - quotas = quota.QuotaController() - certificates = certificate.CertificateController() - mservices = magnum_services.MagnumServiceController() - stats = stats.StatsController() - - @expose.expose(V1) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return V1.convert() - - def _check_version(self, version, headers=None): - if headers is None: - headers = {} - # ensure that major version in the URL matches the header - if version.major != BASE_VERSION: - raise http_error.HTTPNotAcceptableAPIVersion(_( - "Mutually exclusive versions requested. Version %(ver)s " - "requested but not supported by this service." - "The supported version range is: " - "[%(min)s, %(max)s].") % {'ver': version, - 'min': MIN_VER_STR, - 'max': MAX_VER_STR}, - headers=headers, - max_version=str(MAX_VER), - min_version=str(MIN_VER)) - # ensure the minor version is within the supported range - if version < MIN_VER or version > MAX_VER: - raise http_error.HTTPNotAcceptableAPIVersion(_( - "Version %(ver)s was requested but the minor version is not " - "supported by this service. The supported version range is: " - "[%(min)s, %(max)s].") % {'ver': version, 'min': MIN_VER_STR, - 'max': MAX_VER_STR}, - headers=headers, - max_version=str(MAX_VER), - min_version=str(MIN_VER)) - - @pecan.expose() - def _route(self, args): - version = ver.Version( - pecan.request.headers, MIN_VER_STR, MAX_VER_STR) - - # Always set the basic version headers - pecan.response.headers[ver.Version.min_string] = MIN_VER_STR - pecan.response.headers[ver.Version.max_string] = MAX_VER_STR - pecan.response.headers[ver.Version.string] = " ".join( - [ver.Version.service_string, str(version)]) - pecan.response.headers["vary"] = ver.Version.string - - # assert that requested version is supported - self._check_version(version, pecan.response.headers) - pecan.request.version = version - if pecan.request.body: - msg = ("Processing request: url: %(url)s, %(method)s, " - "body: %(body)s" % - {'url': pecan.request.url, - 'method': pecan.request.method, - 'body': pecan.request.body}) - LOG.debug(msg) - - return super(Controller, self)._route(args) - - -__all__ = (Controller) diff --git a/magnum/api/controllers/v1/bay.py b/magnum/api/controllers/v1/bay.py deleted file mode 100755 index c39ae8f4..00000000 --- a/magnum/api/controllers/v1/bay.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_log import log as logging -from oslo_utils import timeutils -import pecan -import wsme -from wsme import types as wtypes - -from magnum.api import attr_validator -from magnum.api.controllers import base -from magnum.api.controllers import link -from magnum.api.controllers.v1 import collection -from magnum.api.controllers.v1 import types -from magnum.api import expose -from magnum.api import utils as api_utils -from magnum.api.validation import validate_cluster_properties -from magnum.common import clients -from magnum.common import exception -from magnum.common import name_generator -from magnum.common import policy -from magnum import objects -from magnum.objects import fields - -LOG = logging.getLogger(__name__) - - -class BayID(wtypes.Base): - uuid = types.uuid - - def __init__(self, uuid): - self.uuid = uuid - - -class Bay(base.APIBase): - """API representation of a bay. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a bay. - """ - - _baymodel_id = None - - def _get_baymodel_id(self): - return self._baymodel_id - - def _set_baymodel_id(self, value): - if value and self._baymodel_id != value: - try: - baymodel = api_utils.get_resource('ClusterTemplate', value) - self._baymodel_id = baymodel.uuid - except exception.ClusterTemplateNotFound as e: - # Change error code because 404 (NotFound) is inappropriate - # response for a POST request to create a Cluster - e.code = 400 # BadRequest - raise - elif value == wtypes.Unset: - self._baymodel_id = wtypes.Unset - - uuid = types.uuid - """Unique UUID for this bay""" - - name = wtypes.StringType(min_length=1, max_length=242, - pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$') - """Name of this bay, max length is limited to 242 because of heat stack - requires max length limit to 255, and Magnum amend a uuid length""" - - baymodel_id = wsme.wsproperty(wtypes.text, _get_baymodel_id, - _set_baymodel_id, mandatory=True) - """The baymodel UUID""" - - node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) - """The node count for this bay. Default to 1 if not set""" - - master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) - """The number of master nodes for this bay. Default to 1 if not set""" - - docker_volume_size = wtypes.IntegerType(minimum=1) - """The size in GB of the docker volume""" - - bay_create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60) - """Timeout for creating the bay in minutes. Default to 60 if not set""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated bay links""" - - stack_id = wsme.wsattr(wtypes.text, readonly=True) - """Stack id of the heat stack""" - - status = wtypes.Enum(str, *fields.ClusterStatus.ALL) - """Status of the bay from the heat stack""" - - status_reason = wtypes.text - """Status reason of the bay from the heat stack""" - - discovery_url = wtypes.text - """Url used for bay node discovery""" - - api_address = wsme.wsattr(wtypes.text, readonly=True) - """Api address of cluster master node""" - - coe_version = wsme.wsattr(wtypes.text, readonly=True) - """Version of the COE software currently running in this cluster. - Example: swarm version or kubernetes version.""" - - container_version = wsme.wsattr(wtypes.text, readonly=True) - """Version of the container software. Example: docker version.""" - - node_addresses = wsme.wsattr([wtypes.text], readonly=True) - """IP addresses of cluster slave nodes""" - - master_addresses = wsme.wsattr([wtypes.text], readonly=True) - """IP addresses of cluster master nodes""" - - bay_faults = wsme.wsattr(wtypes.DictType(str, wtypes.text)) - """Fault info collected from the heat resources of this bay""" - - def __init__(self, **kwargs): - super(Bay, self).__init__() - - self.fields = [] - for field in objects.Cluster.fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - # Set the renamed attributes for bay backwards compatibility - self.fields.append('baymodel_id') - if 'baymodel_id' in kwargs.keys(): - setattr(self, 'cluster_template_id', - kwargs.get('baymodel_id', None)) - setattr(self, 'baymodel_id', - kwargs.get('baymodel_id', None)) - else: - setattr(self, 'baymodel_id', kwargs.get('cluster_template_id', - None)) - - self.fields.append('bay_create_timeout') - if 'bay_create_timeout' in kwargs.keys(): - setattr(self, 'create_timeout', - kwargs.get('bay_create_timeout', wtypes.Unset)) - setattr(self, 'bay_create_timeout', - kwargs.get('bay_create_timeout', wtypes.Unset)) - else: - setattr(self, 'bay_create_timeout', kwargs.get('create_timeout', - wtypes.Unset)) - - self.fields.append('bay_faults') - if 'bay_faults' in kwargs.keys(): - setattr(self, 'faults', - kwargs.get('bay_faults', wtypes.Unset)) - setattr(self, 'bay_faults', - kwargs.get('bay_faults', wtypes.Unset)) - else: - setattr(self, 'bay_faults', kwargs.get('faults', wtypes.Unset)) - - @staticmethod - def _convert_with_links(bay, url, expand=True): - if not expand: - bay.unset_fields_except(['uuid', 'name', 'baymodel_id', - 'docker_volume_size', - 'node_count', 'status', - 'bay_create_timeout', 'master_count', - 'stack_id']) - - bay.links = [link.Link.make_link('self', url, - 'bays', bay.uuid), - link.Link.make_link('bookmark', url, - 'bays', bay.uuid, - bookmark=True)] - return bay - - @classmethod - def convert_with_links(cls, rpc_bay, expand=True): - bay = Bay(**rpc_bay.as_dict()) - return cls._convert_with_links(bay, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='example', - baymodel_id='4a96ac4b-2447-43f1-8ca6-9fd6f36d146d', - node_count=2, - master_count=1, - docker_volume_size=1, - bay_create_timeout=15, - stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', - status=fields.ClusterStatus.CREATE_COMPLETE, - status_reason="CREATE completed successfully", - api_address='172.24.4.3', - node_addresses=['172.24.4.4', '172.24.4.5'], - created_at=timeutils.utcnow(), - updated_at=timeutils.utcnow(), - coe_version=None, - container_version=None) - return cls._convert_with_links(sample, 'http://localhost:9511', expand) - - def as_dict(self): - """Render this object as a dict of its fields.""" - - # Override this for old bay values - d = super(Bay, self).as_dict() - - d['cluster_template_id'] = d['baymodel_id'] - del d['baymodel_id'] - - d['create_timeout'] = d['bay_create_timeout'] - del d['bay_create_timeout'] - - if 'bay_faults' in d.keys(): - d['faults'] = d['bay_faults'] - del d['bay_faults'] - - return d - - -class BayPatchType(types.JsonPatchType): - _api_base = Bay - - @staticmethod - def internal_attrs(): - internal_attrs = ['/api_address', '/node_addresses', - '/master_addresses', '/stack_id', - '/ca_cert_ref', '/magnum_cert_ref', - '/trust_id', '/trustee_user_name', - '/trustee_password', '/trustee_user_id'] - return types.JsonPatchType.internal_attrs() + internal_attrs - - -class BayCollection(collection.Collection): - """API representation of a collection of bays.""" - - bays = [Bay] - """A list containing bays objects""" - - def __init__(self, **kwargs): - self._type = 'bays' - - @staticmethod - def convert_with_links(rpc_bays, limit, url=None, expand=False, **kwargs): - collection = BayCollection() - collection.bays = [Bay.convert_with_links(p, expand) - for p in rpc_bays] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.bays = [Bay.sample(expand=False)] - return sample - - -class BaysController(base.Controller): - """REST controller for Bays.""" - def __init__(self): - super(BaysController, self).__init__() - - _custom_actions = { - 'detail': ['GET'], - } - - def _generate_name_for_bay(self, context): - '''Generate a random name like: zeta-22-bay.''' - name_gen = name_generator.NameGenerator() - name = name_gen.generate() - return name + '-bay' - - def _get_bays_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Cluster.get_by_uuid(pecan.request.context, - marker) - - bays = objects.Cluster.list(pecan.request.context, limit, - marker_obj, sort_key=sort_key, - sort_dir=sort_dir) - - return BayCollection.convert_with_links(bays, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @expose.expose(BayCollection, types.uuid, int, wtypes.text, - wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of bays. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'bay:get_all', - action='bay:get_all') - return self._get_bays_collection(marker, limit, sort_key, - sort_dir) - - @expose.expose(BayCollection, types.uuid, int, wtypes.text, - wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of bays with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'bay:detail', - action='bay:detail') - - # NOTE(lucasagomes): /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "bays": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['bays', 'detail']) - return self._get_bays_collection(marker, limit, - sort_key, sort_dir, expand, - resource_url) - - def _collect_fault_info(self, context, bay): - """Collect fault info from heat resources of given bay - - and store them into bay.bay_faults. - """ - osc = clients.OpenStackClients(context) - filters = {'status': 'FAILED'} - try: - failed_resources = osc.heat().resources.list( - bay.stack_id, nested_depth=2, filters=filters) - except Exception as e: - failed_resources = [] - LOG.warning("Failed to retrieve failed resources for " - "bay %(bay)s from Heat stack %(stack)s " - "due to error: %(e)s", - {'bay': bay.uuid, 'stack': bay.stack_id, 'e': e}, - exc_info=True) - - return {res.resource_name: res.resource_status_reason - for res in failed_resources} - - @expose.expose(Bay, types.uuid_or_name) - def get_one(self, bay_ident): - """Retrieve information about the given bay. - - :param bay_ident: UUID of a bay or logical name of the bay. - """ - context = pecan.request.context - bay = api_utils.get_resource('Cluster', bay_ident) - policy.enforce(context, 'bay:get', bay.as_dict(), - action='bay:get') - - bay = Bay.convert_with_links(bay) - - if bay.status in fields.ClusterStatus.STATUS_FAILED: - bay.bay_faults = self._collect_fault_info(context, bay) - - return bay - - @base.Controller.api_version("1.1", "1.1") - @expose.expose(Bay, body=Bay, status_code=201) - def post(self, bay): - """Create a new bay. - - :param bay: a bay within the request body. - """ - new_bay = self._post(bay) - res_bay = pecan.request.rpcapi.cluster_create(new_bay, - bay.bay_create_timeout) - - # Set the HTTP Location Header - pecan.response.location = link.build_url('bays', res_bay.uuid) - return Bay.convert_with_links(res_bay) - - @base.Controller.api_version("1.2") # noqa - @expose.expose(BayID, body=Bay, status_code=202) - def post(self, bay): - """Create a new bay. - - :param bay: a bay within the request body. - """ - new_bay = self._post(bay) - pecan.request.rpcapi.cluster_create_async(new_bay, - bay.bay_create_timeout) - return BayID(new_bay.uuid) - - def _post(self, bay): - context = pecan.request.context - policy.enforce(context, 'bay:create', - action='bay:create') - baymodel = objects.ClusterTemplate.get_by_uuid(context, - bay.baymodel_id) - - # If docker_volume_size is not present, use baymodel value - if bay.docker_volume_size == wtypes.Unset: - bay.docker_volume_size = baymodel.docker_volume_size - - bay_dict = bay.as_dict() - bay_dict['keypair'] = baymodel.keypair_id - attr_validator.validate_os_resources(context, baymodel.as_dict(), - bay_dict) - attr_validator.validate_master_count(bay.as_dict(), baymodel.as_dict()) - - bay_dict['project_id'] = context.project_id - bay_dict['user_id'] = context.user_id - # NOTE(yuywz): We will generate a random human-readable name for - # bay if the name is not spcified by user. - name = bay_dict.get('name') or self._generate_name_for_bay(context) - bay_dict['name'] = name - bay_dict['coe_version'] = None - bay_dict['container_version'] = None - new_bay = objects.Cluster(context, **bay_dict) - new_bay.uuid = uuid.uuid4() - return new_bay - - @base.Controller.api_version("1.1", "1.1") - @wsme.validate(types.uuid, [BayPatchType]) - @expose.expose(Bay, types.uuid_or_name, body=[BayPatchType]) - def patch(self, bay_ident, patch): - """Update an existing bay. - - :param bay_ident: UUID or logical name of a bay. - :param patch: a json PATCH document to apply to this bay. - """ - bay = self._patch(bay_ident, patch) - res_bay = pecan.request.rpcapi.cluster_update(bay) - return Bay.convert_with_links(res_bay) - - @base.Controller.api_version("1.2", "1.2") # noqa - @wsme.validate(types.uuid, [BayPatchType]) - @expose.expose(BayID, types.uuid_or_name, body=[BayPatchType], - status_code=202) - def patch(self, bay_ident, patch): - """Update an existing bay. - - :param bay_ident: UUID or logical name of a bay. - :param patch: a json PATCH document to apply to this bay. - """ - bay = self._patch(bay_ident, patch) - pecan.request.rpcapi.cluster_update_async(bay) - return BayID(bay.uuid) - - @base.Controller.api_version("1.3") # noqa - @wsme.validate(types.uuid, bool, [BayPatchType]) - @expose.expose(BayID, types.uuid_or_name, types.boolean, - body=[BayPatchType], status_code=202) - def patch(self, bay_ident, rollback=False, patch=None): - """Update an existing bay. - - :param bay_ident: UUID or logical name of a bay. - :param rollback: whether to rollback bay on update failure. - :param patch: a json PATCH document to apply to this bay. - """ - bay = self._patch(bay_ident, patch) - pecan.request.rpcapi.cluster_update_async(bay, rollback=rollback) - return BayID(bay.uuid) - - def _patch(self, bay_ident, patch): - context = pecan.request.context - bay = api_utils.get_resource('Cluster', bay_ident) - policy.enforce(context, 'bay:update', bay.as_dict(), - action='bay:update') - try: - bay_dict = bay.as_dict() - new_bay = Bay(**api_utils.apply_jsonpatch(bay_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.Cluster.fields: - try: - patch_val = getattr(new_bay, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if bay[field] != patch_val: - bay[field] = patch_val - - delta = bay.obj_what_changed() - - validate_cluster_properties(delta) - return bay - - @base.Controller.api_version("1.1", "1.1") - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, bay_ident): - """Delete a bay. - - :param bay_ident: UUID of a bay or logical name of the bay. - """ - bay = self._delete(bay_ident) - - pecan.request.rpcapi.cluster_delete(bay.uuid) - - @base.Controller.api_version("1.2") # noqa - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, bay_ident): - """Delete a bay. - - :param bay_ident: UUID of a bay or logical name of the bay. - """ - bay = self._delete(bay_ident) - - pecan.request.rpcapi.cluster_delete_async(bay.uuid) - - def _delete(self, bay_ident): - context = pecan.request.context - bay = api_utils.get_resource('Cluster', bay_ident) - policy.enforce(context, 'bay:delete', bay.as_dict(), - action='bay:delete') - return bay diff --git a/magnum/api/controllers/v1/baymodel.py b/magnum/api/controllers/v1/baymodel.py deleted file mode 100644 index 7e229e43..00000000 --- a/magnum/api/controllers/v1/baymodel.py +++ /dev/null @@ -1,415 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -import pecan -import wsme -from wsme import types as wtypes - -from magnum.api import attr_validator -from magnum.api.controllers import base -from magnum.api.controllers import link -from magnum.api.controllers.v1 import collection -from magnum.api.controllers.v1 import types -from magnum.api import expose -from magnum.api import utils as api_utils -from magnum.api import validation -from magnum.common import clients -from magnum.common import exception -from magnum.common import name_generator -from magnum.common import policy -from magnum import objects -from magnum.objects import fields - - -class BayModel(base.APIBase): - """API representation of a Baymodel. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a Baymodel. - """ - - uuid = types.uuid - """Unique UUID for this Baymodel""" - - name = wtypes.StringType(min_length=1, max_length=255) - """The name of the Baymodel""" - - coe = wtypes.Enum(str, *fields.ClusterType.ALL, mandatory=True) - """The Container Orchestration Engine for this bay model""" - - image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), - mandatory=True) - """The image name or UUID to use as a base image for this Baymodel""" - - flavor_id = wtypes.StringType(min_length=1, max_length=255) - """The flavor of this Baymodel""" - - master_flavor_id = wtypes.StringType(min_length=1, max_length=255) - """The flavor of the master node for this Baymodel""" - - dns_nameserver = wtypes.IPv4AddressType() - """The DNS nameserver address""" - - keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), - mandatory=True) - """The name of the nova ssh keypair""" - - external_network_id = wtypes.StringType(min_length=1, max_length=255) - """The external network to attach to the Bay""" - - fixed_network = wtypes.StringType(min_length=1, max_length=255) - """The fixed network name to attach to the Bay""" - - fixed_subnet = wtypes.StringType(min_length=1, max_length=255) - """The fixed subnet name to attach to the Bay""" - - network_driver = wtypes.StringType(min_length=1, max_length=255) - """The name of the driver used for instantiating container networks""" - - apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535) - """The API server port for k8s""" - - docker_volume_size = wtypes.IntegerType(minimum=1) - """The size in GB of the docker volume""" - - cluster_distro = wtypes.StringType(min_length=1, max_length=255) - """The Cluster distro for the bay, e.g. coreos, fedora-atomic, etc.""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated Baymodel links""" - - http_proxy = wtypes.StringType(min_length=1, max_length=255) - """Address of a proxy that will receive all HTTP requests and relay them. - The format is a URL including a port number. - """ - - https_proxy = wtypes.StringType(min_length=1, max_length=255) - """Address of a proxy that will receive all HTTPS requests and relay them. - The format is a URL including a port number. - """ - - no_proxy = wtypes.StringType(min_length=1, max_length=255) - """A comma separated list of IPs for which proxies should not be - used in the bay - """ - - volume_driver = wtypes.StringType(min_length=1, max_length=255) - """The name of the driver used for instantiating container volumes""" - - registry_enabled = wsme.wsattr(types.boolean, default=False) - """Indicates whether the docker registry is enabled""" - - labels = wtypes.DictType(str, str) - """One or more key/value pairs""" - - tls_disabled = wsme.wsattr(types.boolean, default=False) - """Indicates whether TLS should be disabled""" - - public = wsme.wsattr(types.boolean, default=False) - """Indicates whether the Baymodel is public or not.""" - - server_type = wsme.wsattr(wtypes.Enum(str, *fields.ServerType.ALL), - default='vm') - """Server type for this bay model""" - - insecure_registry = wtypes.StringType(min_length=1, max_length=255) - """Insecure registry URL when creating a Baymodel""" - - docker_storage_driver = wtypes.Enum(str, *fields.DockerStorageDriver.ALL) - """Docker storage driver""" - - master_lb_enabled = wsme.wsattr(types.boolean, default=False) - """Indicates whether created bays should have a load balancer for master - nodes or not. - """ - - floating_ip_enabled = wsme.wsattr(types.boolean, default=True) - """Indicates whether created bays should have a floating ip or not.""" - - def __init__(self, **kwargs): - self.fields = [] - for field in objects.ClusterTemplate.fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - @staticmethod - def _convert_with_links(baymodel, url): - baymodel.links = [link.Link.make_link('self', url, - 'baymodels', baymodel.uuid), - link.Link.make_link('bookmark', url, - 'baymodels', baymodel.uuid, - bookmark=True)] - return baymodel - - @classmethod - def convert_with_links(cls, rpc_baymodel): - baymodel = BayModel(**rpc_baymodel.as_dict()) - return cls._convert_with_links(baymodel, pecan.request.host_url) - - @classmethod - def sample(cls): - sample = cls( - uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='example', - image_id='Fedora-k8s', - flavor_id='m1.small', - master_flavor_id='m1.small', - dns_nameserver='8.8.1.1', - keypair_id='keypair1', - external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba', - fixed_network='private', - fixed_subnet='private-subnet', - network_driver='libnetwork', - volume_driver='cinder', - apiserver_port=8080, - docker_volume_size=25, - docker_storage_driver='devicemapper', - cluster_distro='fedora-atomic', - coe=fields.ClusterType.KUBERNETES, - http_proxy='http://proxy.com:123', - https_proxy='https://proxy.com:123', - no_proxy='192.168.0.1,192.168.0.2,192.168.0.3', - labels={'key1': 'val1', 'key2': 'val2'}, - server_type='vm', - insecure_registry='10.238.100.100:5000', - created_at=timeutils.utcnow(), - updated_at=timeutils.utcnow(), - public=False, - master_lb_enabled=False, - floating_ip_enabled=True, - ) - return cls._convert_with_links(sample, 'http://localhost:9511') - - -class BayModelPatchType(types.JsonPatchType): - _api_base = BayModel - _extra_non_removable_attrs = {'/network_driver', '/external_network_id', - '/tls_disabled', '/public', '/server_type', - '/coe', '/registry_enabled', - '/cluster_distro'} - - -class BayModelCollection(collection.Collection): - """API representation of a collection of Baymodels.""" - - baymodels = [BayModel] - """A list containing Baymodel objects""" - - def __init__(self, **kwargs): - self._type = 'baymodels' - - @staticmethod - def convert_with_links(rpc_baymodels, limit, url=None, **kwargs): - collection = BayModelCollection() - collection.baymodels = [BayModel.convert_with_links(p) - for p in rpc_baymodels] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.baymodels = [BayModel.sample()] - return sample - - -class BayModelsController(base.Controller): - """REST controller for Baymodels.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _generate_name_for_baymodel(self, context): - '''Generate a random name like: zeta-22-model.''' - - name_gen = name_generator.NameGenerator() - name = name_gen.generate() - return name + '-model' - - def _get_baymodels_collection(self, marker, limit, - sort_key, sort_dir, resource_url=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.ClusterTemplate.get_by_uuid( - pecan.request.context, marker) - - baymodels = objects.ClusterTemplate.list(pecan.request.context, limit, - marker_obj, sort_key=sort_key, - sort_dir=sort_dir) - - return BayModelCollection.convert_with_links(baymodels, limit, - url=resource_url, - sort_key=sort_key, - sort_dir=sort_dir) - - @expose.expose(BayModelCollection, types.uuid, int, wtypes.text, - wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of Baymodels. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'baymodel:get_all', - action='baymodel:get_all') - return self._get_baymodels_collection(marker, limit, sort_key, - sort_dir) - - @expose.expose(BayModelCollection, types.uuid, int, wtypes.text, - wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of Baymodels with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'baymodel:detail', - action='baymodel:detail') - - # NOTE(lucasagomes): /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "baymodels": - raise exception.HTTPNotFound - - resource_url = '/'.join(['baymodels', 'detail']) - return self._get_baymodels_collection(marker, limit, - sort_key, sort_dir, resource_url) - - @expose.expose(BayModel, types.uuid_or_name) - def get_one(self, baymodel_ident): - """Retrieve information about the given Baymodel. - - :param baymodel_ident: UUID or logical name of a baymodel. - """ - context = pecan.request.context - baymodel = api_utils.get_resource('ClusterTemplate', baymodel_ident) - if not baymodel.public: - policy.enforce(context, 'baymodel:get', baymodel.as_dict(), - action='baymodel:get') - - return BayModel.convert_with_links(baymodel) - - @expose.expose(BayModel, body=BayModel, status_code=201) - @validation.enforce_server_type() - @validation.enforce_network_driver_types_create() - @validation.enforce_volume_driver_types_create() - @validation.enforce_volume_storage_size_create() - def post(self, baymodel): - """Create a new Baymodel. - - :param baymodel: a Baymodel within the request body. - """ - context = pecan.request.context - policy.enforce(context, 'baymodel:create', - action='baymodel:create') - baymodel_dict = baymodel.as_dict() - cli = clients.OpenStackClients(context) - attr_validator.validate_os_resources(context, baymodel_dict) - image_data = attr_validator.validate_image(cli, - baymodel_dict['image_id']) - baymodel_dict['cluster_distro'] = image_data['os_distro'] - baymodel_dict['project_id'] = context.project_id - baymodel_dict['user_id'] = context.user_id - # check permissions for making baymodel public - if baymodel_dict['public']: - if not policy.enforce(context, "baymodel:publish", None, - do_raise=False): - raise exception.ClusterTemplatePublishDenied() - - # NOTE(yuywz): We will generate a random human-readable name for - # baymodel if the name is not spcified by user. - arg_name = baymodel_dict.get('name') - name = arg_name or self._generate_name_for_baymodel(context) - baymodel_dict['name'] = name - - new_baymodel = objects.ClusterTemplate(context, **baymodel_dict) - new_baymodel.create() - # Set the HTTP Location Header - pecan.response.location = link.build_url('baymodels', - new_baymodel.uuid) - return BayModel.convert_with_links(new_baymodel) - - @wsme.validate(types.uuid_or_name, [BayModelPatchType]) - @expose.expose(BayModel, types.uuid_or_name, body=[BayModelPatchType]) - @validation.enforce_network_driver_types_update() - @validation.enforce_volume_driver_types_update() - def patch(self, baymodel_ident, patch): - """Update an existing Baymodel. - - :param baymodel_ident: UUID or logic name of a Baymodel. - :param patch: a json PATCH document to apply to this Baymodel. - """ - context = pecan.request.context - baymodel = api_utils.get_resource('ClusterTemplate', baymodel_ident) - policy.enforce(context, 'baymodel:update', baymodel.as_dict(), - action='baymodel:update') - try: - baymodel_dict = baymodel.as_dict() - new_baymodel = BayModel(**api_utils.apply_jsonpatch( - baymodel_dict, - patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - new_baymodel_dict = new_baymodel.as_dict() - attr_validator.validate_os_resources(context, new_baymodel_dict) - # check permissions when updating baymodel public flag - if baymodel.public != new_baymodel.public: - if not policy.enforce(context, "baymodel:publish", None, - do_raise=False): - raise exception.ClusterTemplatePublishDenied() - - # Update only the fields that have changed - for field in objects.ClusterTemplate.fields: - try: - patch_val = getattr(new_baymodel, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if baymodel[field] != patch_val: - baymodel[field] = patch_val - - baymodel.save() - return BayModel.convert_with_links(baymodel) - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, baymodel_ident): - """Delete a Baymodel. - - :param baymodel_ident: UUID or logical name of a Baymodel. - """ - context = pecan.request.context - baymodel = api_utils.get_resource('ClusterTemplate', baymodel_ident) - policy.enforce(context, 'baymodel:delete', baymodel.as_dict(), - action='baymodel:delete') - baymodel.destroy() diff --git a/magnum/api/controllers/v1/certificate.py b/magnum/api/controllers/v1/certificate.py deleted file mode 100644 index 069cf122..00000000 --- a/magnum/api/controllers/v1/certificate.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -import pecan -import wsme -from wsme import types as wtypes - -from magnum.api.controllers import base -from magnum.api.controllers import link -from magnum.api.controllers.v1 import types -from magnum.api import expose -from magnum.api import utils as api_utils -from magnum.common import exception -from magnum.common import policy -from magnum import objects - - -class Certificate(base.APIBase): - """API representation of a certificate. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a - certificate. - """ - - _cluster_uuid = None - """uuid or logical name of cluster""" - - _cluster = None - - def _get_cluster_uuid(self): - return self._cluster_uuid - - def _set_cluster_uuid(self, value): - if value and self._cluster_uuid != value: - try: - self._cluster = api_utils.get_resource('Cluster', value) - self._cluster_uuid = self._cluster.uuid - except exception.ClusterNotFound as e: - # Change error code because 404 (NotFound) is inappropriate - # response for a POST request to create a Cluster - e.code = 400 # BadRequest - raise - elif value == wtypes.Unset: - self._cluster_uuid = wtypes.Unset - - bay_uuid = wsme.wsproperty(wtypes.text, _get_cluster_uuid, - _set_cluster_uuid) - """The bay UUID or id""" - - cluster_uuid = wsme.wsproperty(wtypes.text, _get_cluster_uuid, - _set_cluster_uuid) - """The cluster UUID or id""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated certificate links""" - - csr = wtypes.StringType(min_length=1) - """"The Certificate Signing Request""" - - pem = wtypes.StringType() - """"The Signed Certificate""" - - def __init__(self, **kwargs): - super(Certificate, self).__init__() - - self.fields = [] - for field in objects.Certificate.fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - # set the attribute for bay_uuid for backwards compatibility - self.fields.append('bay_uuid') - setattr(self, 'bay_uuid', kwargs.get('bay_uuid', self._cluster_uuid)) - - def get_cluster(self): - if not self._cluster: - self._cluster = api_utils.get_resource('Cluster', - self.cluster_uuid) - return self._cluster - - @staticmethod - def _convert_with_links(certificate, url, expand=True): - if not expand: - certificate.unset_fields_except(['bay_uuid', 'cluster_uuid', - 'csr', 'pem']) - - certificate.links = [link.Link.make_link('self', url, - 'certificates', - certificate.cluster_uuid), - link.Link.make_link('bookmark', url, - 'certificates', - certificate.cluster_uuid, - bookmark=True)] - return certificate - - @classmethod - def convert_with_links(cls, rpc_cert, expand=True): - cert = Certificate(**rpc_cert.as_dict()) - return cls._convert_with_links(cert, - pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(bay_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', - cluster_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', - created_at=timeutils.utcnow(), - csr='AAA....AAA') - return cls._convert_with_links(sample, 'http://localhost:9511', expand) - - -class CertificateController(base.Controller): - """REST controller for Certificate.""" - - def __init__(self): - super(CertificateController, self).__init__() - - _custom_actions = { - 'detail': ['GET'], - } - - @expose.expose(Certificate, types.uuid_or_name) - def get_one(self, cluster_ident): - """Retrieve CA information about the given cluster. - - :param cluster_ident: UUID of a cluster or - logical name of the cluster. - """ - context = pecan.request.context - cluster = api_utils.get_resource('Cluster', cluster_ident) - policy.enforce(context, 'certificate:get', cluster.as_dict(), - action='certificate:get') - certificate = pecan.request.rpcapi.get_ca_certificate(cluster) - return Certificate.convert_with_links(certificate) - - @expose.expose(Certificate, body=Certificate, status_code=201) - def post(self, certificate): - """Sign a new certificate by the CA. - - :param certificate: a certificate within the request body. - """ - context = pecan.request.context - cluster = certificate.get_cluster() - policy.enforce(context, 'certificate:create', cluster.as_dict(), - action='certificate:create') - certificate_dict = certificate.as_dict() - certificate_dict['project_id'] = context.project_id - certificate_dict['user_id'] = context.user_id - cert_obj = objects.Certificate(context, **certificate_dict) - - new_cert = pecan.request.rpcapi.sign_certificate(cluster, - cert_obj) - return Certificate.convert_with_links(new_cert) - - @expose.expose(None, types.uuid_or_name, status_code=202) - def patch(self, cluster_ident): - context = pecan.request.context - cluster = api_utils.get_resource('Cluster', cluster_ident) - policy.enforce(context, 'certificate:rotate_ca', cluster.as_dict(), - action='certificate:rotate_ca') - if cluster.cluster_template.tls_disabled: - raise exception.NotSupported("Rotating the CA certificate on a " - "non-TLS cluster is not supported") - pecan.request.rpcapi.rotate_ca_certificate(cluster) diff --git a/magnum/api/controllers/v1/cluster.py b/magnum/api/controllers/v1/cluster.py deleted file mode 100755 index e8753354..00000000 --- a/magnum/api/controllers/v1/cluster.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_log import log as logging -from oslo_utils import timeutils -import pecan -import wsme -from wsme import types as wtypes - -from magnum.api import attr_validator -from magnum.api.controllers import base -from magnum.api.controllers import link -from magnum.api.controllers.v1 import collection -from magnum.api.controllers.v1 import types -from magnum.api import expose -from magnum.api import utils as api_utils -from magnum.api import validation -from magnum.common import clients -from magnum.common import exception -from magnum.common import name_generator -from magnum.common import policy -import magnum.conf -from magnum.i18n import _ -from magnum import objects -from magnum.objects import fields - -LOG = logging.getLogger(__name__) -CONF = magnum.conf.CONF - - -class ClusterID(wtypes.Base): - """API representation of a cluster ID - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a cluster - ID. - """ - - uuid = types.uuid - """Unique UUID for this cluster""" - - def __init__(self, uuid): - self.uuid = uuid - - -class Cluster(base.APIBase): - """API representation of a cluster. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a Cluster. - """ - - _cluster_template_id = None - - def _get_cluster_template_id(self): - return self._cluster_template_id - - def _set_cluster_template_id(self, value): - if value and self._cluster_template_id != value: - try: - cluster_template = api_utils.get_resource('ClusterTemplate', - value) - self._cluster_template_id = cluster_template.uuid - except exception.ClusterTemplateNotFound as e: - # Change error code because 404 (NotFound) is inappropriate - # response for a POST request to create a Cluster - e.code = 400 # BadRequest - raise - elif value == wtypes.Unset: - self._cluster_template_id = wtypes.Unset - - uuid = types.uuid - """Unique UUID for this cluster""" - - name = wtypes.StringType(min_length=1, max_length=242, - pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$') - """Name of this cluster, max length is limited to 242 because of heat - stack requires max length limit to 255, and Magnum amend a uuid length""" - - cluster_template_id = wsme.wsproperty(wtypes.text, - _get_cluster_template_id, - _set_cluster_template_id, - mandatory=True) - """The cluster_template UUID""" - - keypair = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), - default=None) - """The name of the nova ssh keypair""" - - node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) - """The node count for this cluster. Default to 1 if not set""" - - master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) - """The number of master nodes for this cluster. Default to 1 if not set""" - - docker_volume_size = wtypes.IntegerType(minimum=1) - """The size in GB of the docker volume""" - - create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60) - """Timeout for creating the cluster in minutes. Default to 60 if not set""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated cluster links""" - - stack_id = wsme.wsattr(wtypes.text, readonly=True) - """Stack id of the heat stack""" - - status = wtypes.Enum(str, *fields.ClusterStatus.ALL) - """Status of the cluster from the heat stack""" - - status_reason = wtypes.text - """Status reason of the cluster from the heat stack""" - - discovery_url = wtypes.text - """Url used for cluster node discovery""" - - api_address = wsme.wsattr(wtypes.text, readonly=True) - """Api address of cluster master node""" - - coe_version = wsme.wsattr(wtypes.text, readonly=True) - """Version of the COE software currently running in this cluster. - Example: swarm version or kubernetes version.""" - - container_version = wsme.wsattr(wtypes.text, readonly=True) - """Version of the container software. Example: docker version.""" - - node_addresses = wsme.wsattr([wtypes.text], readonly=True) - """IP addresses of cluster slave nodes""" - - master_addresses = wsme.wsattr([wtypes.text], readonly=True) - """IP addresses of cluster master nodes""" - - faults = wsme.wsattr(wtypes.DictType(str, wtypes.text)) - """Fault info collected from the heat resources of this cluster""" - - def __init__(self, **kwargs): - super(Cluster, self).__init__() - self.fields = [] - for field in objects.Cluster.fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - @staticmethod - def _convert_with_links(cluster, url, expand=True): - if not expand: - cluster.unset_fields_except(['uuid', 'name', 'cluster_template_id', - 'keypair', 'docker_volume_size', - 'node_count', 'status', - 'create_timeout', 'master_count', - 'stack_id']) - - cluster.links = [link.Link.make_link('self', url, - 'clusters', cluster.uuid), - link.Link.make_link('bookmark', url, - 'clusters', cluster.uuid, - bookmark=True)] - return cluster - - @classmethod - def convert_with_links(cls, rpc_cluster, expand=True): - cluster = Cluster(**rpc_cluster.as_dict()) - return cls._convert_with_links(cluster, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - temp_id = '4a96ac4b-2447-43f1-8ca6-9fd6f36d146d' - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='example', - cluster_template_id=temp_id, - keypair=None, - node_count=2, - master_count=1, - docker_volume_size=1, - create_timeout=15, - stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', - status=fields.ClusterStatus.CREATE_COMPLETE, - status_reason="CREATE completed successfully", - api_address='172.24.4.3', - node_addresses=['172.24.4.4', '172.24.4.5'], - created_at=timeutils.utcnow(), - updated_at=timeutils.utcnow(), - coe_version=None, - container_version=None) - return cls._convert_with_links(sample, 'http://localhost:9511', expand) - - -class ClusterPatchType(types.JsonPatchType): - _api_base = Cluster - - @staticmethod - def internal_attrs(): - internal_attrs = ['/api_address', '/node_addresses', - '/master_addresses', '/stack_id', - '/ca_cert_ref', '/magnum_cert_ref', - '/trust_id', '/trustee_user_name', - '/trustee_password', '/trustee_user_id'] - return types.JsonPatchType.internal_attrs() + internal_attrs - - -class ClusterCollection(collection.Collection): - """API representation of a collection of clusters.""" - - clusters = [Cluster] - """A list containing cluster objects""" - - def __init__(self, **kwargs): - self._type = 'clusters' - - @staticmethod - def convert_with_links(rpc_clusters, limit, url=None, expand=False, - **kwargs): - collection = ClusterCollection() - collection.clusters = [Cluster.convert_with_links(p, expand) - for p in rpc_clusters] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.clusters = [Cluster.sample(expand=False)] - return sample - - -class ClustersController(base.Controller): - """REST controller for Clusters.""" - - def __init__(self): - super(ClustersController, self).__init__() - - _custom_actions = { - 'detail': ['GET'], - } - - def _generate_name_for_cluster(self, context): - """Generate a random name like: zeta-22-cluster.""" - name_gen = name_generator.NameGenerator() - name = name_gen.generate() - return name + '-cluster' - - def _get_clusters_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Cluster.get_by_uuid(pecan.request.context, - marker) - - clusters = objects.Cluster.list(pecan.request.context, limit, - marker_obj, sort_key=sort_key, - sort_dir=sort_dir) - - return ClusterCollection.convert_with_links(clusters, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, - wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of clusters. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'cluster:get_all', - action='cluster:get_all') - return self._get_clusters_collection(marker, limit, sort_key, - sort_dir) - - @expose.expose(ClusterCollection, types.uuid, int, wtypes.text, - wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of clusters with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'cluster:detail', - action='cluster:detail') - - # NOTE(lucasagomes): /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "clusters": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['clusters', 'detail']) - return self._get_clusters_collection(marker, limit, - sort_key, sort_dir, expand, - resource_url) - - def _collect_fault_info(self, context, cluster): - """Collect fault info from heat resources of given cluster - - and store them into cluster.faults. - """ - osc = clients.OpenStackClients(context) - filters = {'status': 'FAILED'} - try: - failed_resources = osc.heat().resources.list( - cluster.stack_id, nested_depth=2, filters=filters) - except Exception as e: - failed_resources = [] - LOG.warning("Failed to retrieve failed resources for " - "cluster %(cluster)s from Heat stack " - "%(stack)s due to error: %(e)s", - {'cluster': cluster.uuid, - 'stack': cluster.stack_id, 'e': e}, - exc_info=True) - - return {res.resource_name: res.resource_status_reason - for res in failed_resources} - - @expose.expose(Cluster, types.uuid_or_name) - def get_one(self, cluster_ident): - """Retrieve information about the given Cluster. - - :param cluster_ident: UUID or logical name of the Cluster. - """ - context = pecan.request.context - cluster = api_utils.get_resource('Cluster', cluster_ident) - policy.enforce(context, 'cluster:get', cluster.as_dict(), - action='cluster:get') - - cluster = Cluster.convert_with_links(cluster) - - if cluster.status in fields.ClusterStatus.STATUS_FAILED: - cluster.faults = self._collect_fault_info(context, cluster) - - return cluster - - def _check_cluster_quota_limit(self, context): - try: - # Check if there is any explicit quota limit set in Quotas table - quota = objects.Quota.get_quota_by_project_id_resource( - context, - context.project_id, - 'Cluster') - cluster_limit = quota.hard_limit - except exception.QuotaNotFound: - # If explicit quota was not set for the project, use default limit - cluster_limit = CONF.quotas.max_clusters_per_project - - if objects.Cluster.get_count_all(context) >= cluster_limit: - msg = _("You have reached the maximum clusters per project, " - "%d. You may delete a cluster to make room for a new " - "one.") % cluster_limit - raise exception.ResourceLimitExceeded(msg=msg) - - @expose.expose(ClusterID, body=Cluster, status_code=202) - @validation.enforce_cluster_type_supported() - @validation.enforce_cluster_volume_storage_size() - def post(self, cluster): - """Create a new cluster. - - :param cluster: a cluster within the request body. - """ - context = pecan.request.context - policy.enforce(context, 'cluster:create', - action='cluster:create') - - self._check_cluster_quota_limit(context) - - temp_id = cluster.cluster_template_id - cluster_template = objects.ClusterTemplate.get_by_uuid(context, - temp_id) - # If keypair not present, use cluster_template value - if cluster.keypair is None: - cluster.keypair = cluster_template.keypair_id - - # If docker_volume_size is not present, use cluster_template value - if cluster.docker_volume_size == wtypes.Unset: - cluster.docker_volume_size = cluster_template.docker_volume_size - - cluster_dict = cluster.as_dict() - - attr_validator.validate_os_resources(context, - cluster_template.as_dict(), - cluster_dict) - attr_validator.validate_master_count(cluster_dict, - cluster_template.as_dict()) - - cluster_dict['project_id'] = context.project_id - cluster_dict['user_id'] = context.user_id - # NOTE(yuywz): We will generate a random human-readable name for - # cluster if the name is not specified by user. - name = cluster_dict.get('name') or \ - self._generate_name_for_cluster(context) - cluster_dict['name'] = name - cluster_dict['coe_version'] = None - cluster_dict['container_version'] = None - - new_cluster = objects.Cluster(context, **cluster_dict) - new_cluster.uuid = uuid.uuid4() - pecan.request.rpcapi.cluster_create_async(new_cluster, - cluster.create_timeout) - - return ClusterID(new_cluster.uuid) - - @base.Controller.api_version("1.1", "1.2") - @wsme.validate(types.uuid, [ClusterPatchType]) - @expose.expose(ClusterID, types.uuid_or_name, body=[ClusterPatchType], - status_code=202) - def patch(self, cluster_ident, patch): - """Update an existing Cluster. - - :param cluster_ident: UUID or logical name of a cluster. - :param patch: a json PATCH document to apply to this cluster. - """ - cluster = self._patch(cluster_ident, patch) - pecan.request.rpcapi.cluster_update_async(cluster) - return ClusterID(cluster.uuid) - - @base.Controller.api_version("1.3") # noqa - @wsme.validate(types.uuid, bool, [ClusterPatchType]) - @expose.expose(ClusterID, types.uuid_or_name, types.boolean, - body=[ClusterPatchType], status_code=202) - def patch(self, cluster_ident, rollback=False, patch=None): - """Update an existing Cluster. - - :param cluster_ident: UUID or logical name of a cluster. - :param rollback: whether to rollback cluster on update failure. - :param patch: a json PATCH document to apply to this cluster. - """ - cluster = self._patch(cluster_ident, patch) - pecan.request.rpcapi.cluster_update_async(cluster, rollback) - return ClusterID(cluster.uuid) - - def _patch(self, cluster_ident, patch): - context = pecan.request.context - cluster = api_utils.get_resource('Cluster', cluster_ident) - policy.enforce(context, 'cluster:update', cluster.as_dict(), - action='cluster:update') - try: - cluster_dict = cluster.as_dict() - new_cluster = Cluster(**api_utils.apply_jsonpatch(cluster_dict, - patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.Cluster.fields: - try: - patch_val = getattr(new_cluster, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if cluster[field] != patch_val: - cluster[field] = patch_val - - delta = cluster.obj_what_changed() - - validation.validate_cluster_properties(delta) - return cluster - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, cluster_ident): - """Delete a cluster. - - :param cluster_ident: UUID of cluster or logical name of the cluster. - """ - context = pecan.request.context - cluster = api_utils.get_resource('Cluster', cluster_ident) - policy.enforce(context, 'cluster:delete', cluster.as_dict(), - action='cluster:delete') - - pecan.request.rpcapi.cluster_delete_async(cluster.uuid) diff --git a/magnum/api/controllers/v1/cluster_template.py b/magnum/api/controllers/v1/cluster_template.py deleted file mode 100644 index cc95bfcf..00000000 --- a/magnum/api/controllers/v1/cluster_template.py +++ /dev/null @@ -1,435 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -import pecan -import wsme -from wsme import types as wtypes - -from magnum.api import attr_validator -from magnum.api.controllers import base -from magnum.api.controllers import link -from magnum.api.controllers.v1 import collection -from magnum.api.controllers.v1 import types -from magnum.api import expose -from magnum.api import utils as api_utils -from magnum.api import validation -from magnum.common import clients -from magnum.common import exception -from magnum.common import name_generator -from magnum.common import policy -from magnum import objects -from magnum.objects import fields - - -class ClusterTemplate(base.APIBase): - """API representation of a ClusterTemplate. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of - a ClusterTemplate. - """ - - uuid = types.uuid - """Unique UUID for this ClusterTemplate""" - - name = wtypes.StringType(min_length=1, max_length=255) - """The name of the ClusterTemplate""" - - coe = wtypes.Enum(str, *fields.ClusterType.ALL, mandatory=True) - """The Container Orchestration Engine for this clustertemplate""" - - image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), - mandatory=True) - """The image name or UUID to use as an image for this ClusterTemplate""" - - flavor_id = wtypes.StringType(min_length=1, max_length=255) - """The flavor of this ClusterTemplate""" - - master_flavor_id = wtypes.StringType(min_length=1, max_length=255) - """The flavor of the master node for this ClusterTemplate""" - - dns_nameserver = wtypes.IPv4AddressType() - """The DNS nameserver address""" - - keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), - default=None) - """The name of the nova ssh keypair""" - - external_network_id = wtypes.StringType(min_length=1, max_length=255) - """The external network to attach to the Cluster""" - - fixed_network = wtypes.StringType(min_length=1, max_length=255) - """The fixed network name to attach to the Cluster""" - - fixed_subnet = wtypes.StringType(min_length=1, max_length=255) - """The fixed subnet name to attach to the Cluster""" - - network_driver = wtypes.StringType(min_length=1, max_length=255) - """The name of the driver used for instantiating container networks""" - - apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535) - """The API server port for k8s""" - - docker_volume_size = wtypes.IntegerType(minimum=1) - """The size in GB of the docker volume""" - - cluster_distro = wtypes.StringType(min_length=1, max_length=255) - """The Cluster distro for the Cluster, e.g. coreos, fedora-atomic, etc.""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated ClusterTemplate links""" - - http_proxy = wtypes.StringType(min_length=1, max_length=255) - """Address of a proxy that will receive all HTTP requests and relay them. - The format is a URL including a port number. - """ - - https_proxy = wtypes.StringType(min_length=1, max_length=255) - """Address of a proxy that will receive all HTTPS requests and relay them. - The format is a URL including a port number. - """ - - no_proxy = wtypes.StringType(min_length=1, max_length=255) - """A comma separated list of IPs for which proxies should not be - used in the cluster - """ - - volume_driver = wtypes.StringType(min_length=1, max_length=255) - """The name of the driver used for instantiating container volumes""" - - registry_enabled = wsme.wsattr(types.boolean, default=False) - """Indicates whether the docker registry is enabled""" - - labels = wtypes.DictType(str, str) - """One or more key/value pairs""" - - tls_disabled = wsme.wsattr(types.boolean, default=False) - """Indicates whether the TLS should be disabled""" - - public = wsme.wsattr(types.boolean, default=False) - """Indicates whether the ClusterTemplate is public or not.""" - - server_type = wsme.wsattr(wtypes.Enum(str, *fields.ServerType.ALL), - default='vm') - """Server type for this ClusterTemplate """ - - insecure_registry = wtypes.StringType(min_length=1, max_length=255) - """Insecure registry URL when creating a ClusterTemplate """ - - docker_storage_driver = wtypes.Enum(str, *fields.DockerStorageDriver.ALL) - """Docker storage driver""" - - master_lb_enabled = wsme.wsattr(types.boolean, default=False) - """Indicates whether created clusters should have a load balancer for master - nodes or not. - """ - - floating_ip_enabled = wsme.wsattr(types.boolean, default=True) - """Indicates whether created clusters should have a floating ip or not.""" - - def __init__(self, **kwargs): - self.fields = [] - for field in objects.ClusterTemplate.fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - @staticmethod - def _convert_with_links(cluster_template, url): - cluster_template.links = [link.Link.make_link('self', url, - 'clustertemplates', - cluster_template.uuid), - link.Link.make_link('bookmark', url, - 'clustertemplates', - cluster_template.uuid, - bookmark=True)] - return cluster_template - - @classmethod - def convert_with_links(cls, rpc_cluster_template): - cluster_template = ClusterTemplate(**rpc_cluster_template.as_dict()) - return cls._convert_with_links(cluster_template, - pecan.request.host_url) - - @classmethod - def sample(cls): - sample = cls( - uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='example', - image_id='Fedora-k8s', - flavor_id='m1.small', - master_flavor_id='m1.small', - dns_nameserver='8.8.1.1', - keypair_id='keypair1', - external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba', - fixed_network='private', - fixed_subnet='private-subnet', - network_driver='libnetwork', - volume_driver='cinder', - apiserver_port=8080, - docker_volume_size=25, - docker_storage_driver='devicemapper', - cluster_distro='fedora-atomic', - coe=fields.ClusterType.KUBERNETES, - http_proxy='http://proxy.com:123', - https_proxy='https://proxy.com:123', - no_proxy='192.168.0.1,192.168.0.2,192.168.0.3', - labels={'key1': 'val1', 'key2': 'val2'}, - server_type='vm', - insecure_registry='10.238.100.100:5000', - created_at=timeutils.utcnow(), - updated_at=timeutils.utcnow(), - public=False, - master_lb_enabled=False, - floating_ip_enabled=True) - return cls._convert_with_links(sample, 'http://localhost:9511') - - -class ClusterTemplatePatchType(types.JsonPatchType): - _api_base = ClusterTemplate - _extra_non_removable_attrs = {'/network_driver', '/external_network_id', - '/tls_disabled', '/public', '/server_type', - '/coe', '/registry_enabled', - '/cluster_distro'} - - -class ClusterTemplateCollection(collection.Collection): - """API representation of a collection of ClusterTemplates.""" - - clustertemplates = [ClusterTemplate] - """A list containing ClusterTemplates objects""" - - def __init__(self, **kwargs): - self._type = 'clustertemplates' - - @staticmethod - def convert_with_links(rpc_cluster_templates, limit, url=None, **kwargs): - collection = ClusterTemplateCollection() - collection.clustertemplates = [ClusterTemplate.convert_with_links(p) - for p in rpc_cluster_templates] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.clustertemplates = [ClusterTemplate.sample()] - return sample - - -class ClusterTemplatesController(base.Controller): - """REST controller for ClusterTemplates.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _generate_name_for_cluster_template(self, context): - """Generate a random name like: zeta-22-model.""" - - name_gen = name_generator.NameGenerator() - name = name_gen.generate() - return name + '-template' - - def _get_cluster_templates_collection(self, marker, limit, - sort_key, sort_dir, - resource_url=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.ClusterTemplate.get_by_uuid( - pecan.request.context, marker) - - cluster_templates = objects.ClusterTemplate.list( - pecan.request.context, limit, marker_obj, sort_key=sort_key, - sort_dir=sort_dir) - - return ClusterTemplateCollection.convert_with_links(cluster_templates, - limit, - url=resource_url, - sort_key=sort_key, - sort_dir=sort_dir) - - @expose.expose(ClusterTemplateCollection, types.uuid, int, wtypes.text, - wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of ClusterTemplates. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'clustertemplate:get_all', - action='clustertemplate:get_all') - return self._get_cluster_templates_collection(marker, limit, sort_key, - sort_dir) - - @expose.expose(ClusterTemplateCollection, types.uuid, int, wtypes.text, - wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of ClusterTemplates with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'clustertemplate:detail', - action='clustertemplate:detail') - - # NOTE(lucasagomes): /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "clustertemplates": - raise exception.HTTPNotFound - - resource_url = '/'.join(['clustertemplates', 'detail']) - return self._get_cluster_templates_collection(marker, limit, - sort_key, sort_dir, - resource_url) - - @expose.expose(ClusterTemplate, types.uuid_or_name) - def get_one(self, cluster_template_ident): - """Retrieve information about the given ClusterTemplate. - - :param cluster_template_ident: UUID or logical name of a - ClusterTemplate. - """ - context = pecan.request.context - cluster_template = api_utils.get_resource('ClusterTemplate', - cluster_template_ident) - if not cluster_template.public: - policy.enforce(context, 'clustertemplate:get', - cluster_template.as_dict(), - action='clustertemplate:get') - - return ClusterTemplate.convert_with_links(cluster_template) - - @expose.expose(ClusterTemplate, body=ClusterTemplate, status_code=201) - @validation.enforce_server_type() - @validation.enforce_network_driver_types_create() - @validation.enforce_volume_driver_types_create() - @validation.enforce_volume_storage_size_create() - def post(self, cluster_template): - """Create a new ClusterTemplate. - - :param cluster_template: a ClusterTemplate within the request body. - """ - context = pecan.request.context - policy.enforce(context, 'clustertemplate:create', - action='clustertemplate:create') - cluster_template_dict = cluster_template.as_dict() - cli = clients.OpenStackClients(context) - attr_validator.validate_os_resources(context, cluster_template_dict) - image_data = attr_validator.validate_image(cli, - cluster_template_dict[ - 'image_id']) - cluster_template_dict['cluster_distro'] = image_data['os_distro'] - cluster_template_dict['project_id'] = context.project_id - cluster_template_dict['user_id'] = context.user_id - # check permissions for making cluster_template public - if cluster_template_dict['public']: - if not policy.enforce(context, "clustertemplate:publish", None, - do_raise=False): - raise exception.ClusterTemplatePublishDenied() - - # NOTE(yuywz): We will generate a random human-readable name for - # cluster_template if the name is not specified by user. - arg_name = cluster_template_dict.get('name') - name = arg_name or self._generate_name_for_cluster_template(context) - cluster_template_dict['name'] = name - - new_cluster_template = objects.ClusterTemplate(context, - **cluster_template_dict) - new_cluster_template.create() - # Set the HTTP Location Header - pecan.response.location = link.build_url('clustertemplates', - new_cluster_template.uuid) - return ClusterTemplate.convert_with_links(new_cluster_template) - - @wsme.validate(types.uuid_or_name, [ClusterTemplatePatchType]) - @expose.expose(ClusterTemplate, types.uuid_or_name, - body=[ClusterTemplatePatchType]) - @validation.enforce_network_driver_types_update() - @validation.enforce_volume_driver_types_update() - def patch(self, cluster_template_ident, patch): - """Update an existing ClusterTemplate. - - :param cluster_template_ident: UUID or logic name of a - ClusterTemplate. - :param patch: a json PATCH document to apply to this - ClusterTemplate. - """ - context = pecan.request.context - cluster_template = api_utils.get_resource('ClusterTemplate', - cluster_template_ident) - policy.enforce(context, 'clustertemplate:update', - cluster_template.as_dict(), - action='clustertemplate:update') - try: - cluster_template_dict = cluster_template.as_dict() - new_cluster_template = ClusterTemplate(**api_utils.apply_jsonpatch( - cluster_template_dict, - patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - new_cluster_template_dict = new_cluster_template.as_dict() - attr_validator.validate_os_resources(context, - new_cluster_template_dict) - # check permissions when updating ClusterTemplate public flag - if cluster_template.public != new_cluster_template.public: - if not policy.enforce(context, "clustertemplate:publish", None, - do_raise=False): - raise exception.ClusterTemplatePublishDenied() - - # Update only the fields that have changed - for field in objects.ClusterTemplate.fields: - try: - patch_val = getattr(new_cluster_template, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if cluster_template[field] != patch_val: - cluster_template[field] = patch_val - - cluster_template.save() - return ClusterTemplate.convert_with_links(cluster_template) - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, cluster_template_ident): - """Delete a ClusterTemplate. - - :param cluster_template_ident: UUID or logical name of a - ClusterTemplate. - """ - context = pecan.request.context - cluster_template = api_utils.get_resource('ClusterTemplate', - cluster_template_ident) - policy.enforce(context, 'clustertemplate:delete', - cluster_template.as_dict(), - action='clustertemplate:delete') - cluster_template.destroy() diff --git a/magnum/api/controllers/v1/collection.py b/magnum/api/controllers/v1/collection.py deleted file mode 100644 index c17251e5..00000000 --- a/magnum/api/controllers/v1/collection.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from wsme import types as wtypes - -from magnum.api.controllers import base -from magnum.api.controllers import link - - -class Collection(base.APIBase): - - next = wtypes.text - """A link to retrieve the next subset of the collection""" - - @property - def collection(self): - return getattr(self, self._type) - - def has_next(self, limit): - """Return whether collection has more items.""" - return len(self.collection) and len(self.collection) == limit - - def get_next(self, limit, url=None, marker_attribute='uuid', **kwargs): - """Return a link to the next subset of the collection.""" - if not self.has_next(limit): - return wtypes.Unset - - resource_url = url or self._type - q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) - next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { - 'args': q_args, 'limit': limit, - 'marker': getattr(self.collection[-1], marker_attribute)} - - return link.Link.make_link('next', pecan.request.host_url, - resource_url, next_args).href diff --git a/magnum/api/controllers/v1/magnum_services.py b/magnum/api/controllers/v1/magnum_services.py deleted file mode 100644 index 90db8cd0..00000000 --- a/magnum/api/controllers/v1/magnum_services.py +++ /dev/null @@ -1,102 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -import wsme -from wsme import types as wtypes - -from magnum.api.controllers import base -from magnum.api.controllers.v1 import collection -from magnum.api import expose -from magnum.api import servicegroup as svcgrp_api -from magnum.common import policy -from magnum import objects -from magnum.objects import fields - - -class MagnumService(base.APIBase): - - host = wtypes.StringType(min_length=1, max_length=255) - """Name of the host """ - - binary = wtypes.Enum(str, *fields.MagnumServiceBinary.ALL) - """Name of the binary""" - - state = wtypes.Enum(str, *fields.MagnumServiceState.ALL) - """State of the binary""" - - id = wsme.wsattr(wtypes.IntegerType(minimum=1)) - """The id for the healthcheck record """ - - report_count = wsme.wsattr(wtypes.IntegerType(minimum=0)) - """The number of times the heartbeat was reported """ - - # disabled = wsme.wsattr(wtypes.BoolType(default=False)) - """If the service is 'disabled' administratively """ - - disabled_reason = wtypes.StringType(min_length=0, max_length=255) - """Reason for disabling """ - - def __init__(self, state, **kwargs): - super(MagnumService, self).__init__() - - self.fields = ['state'] - setattr(self, 'state', state) - for field in objects.MagnumService.fields: - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - -class MagnumServiceCollection(collection.Collection): - - mservices = [MagnumService] - """A list containing service objects""" - - def __init__(self, **kwargs): - super(MagnumServiceCollection, self).__init__() - self._type = 'mservices' - - @staticmethod - def convert_db_rec_list_to_collection(servicegroup_api, - rpc_msvcs, **kwargs): - collection = MagnumServiceCollection() - collection.mservices = [] - for p in rpc_msvcs: - alive = servicegroup_api.service_is_up(p) - state = 'up' if alive else 'down' - msvc = MagnumService(state, **p.as_dict()) - collection.mservices.append(msvc) - collection.next = collection.get_next(limit=None, url=None, **kwargs) - return collection - - -class MagnumServiceController(base.Controller): - """REST controller for magnum-services.""" - - def __init__(self, **kwargs): - super(MagnumServiceController, self).__init__() - self.servicegroup_api = svcgrp_api.ServiceGroup() - - @expose.expose(MagnumServiceCollection) - @policy.enforce_wsgi("magnum-service") - def get_all(self): - """Retrieve a list of magnum-services. - - """ - msvcs = objects.MagnumService.list(pecan.request.context, - limit=None, - marker=None, - sort_key='id', - sort_dir='asc') - - return MagnumServiceCollection.convert_db_rec_list_to_collection( - self.servicegroup_api, msvcs) diff --git a/magnum/api/controllers/v1/quota.py b/magnum/api/controllers/v1/quota.py deleted file mode 100644 index 5a2b0eb9..00000000 --- a/magnum/api/controllers/v1/quota.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -import wsme -from wsme import types as wtypes - -from magnum.api.controllers import base -from magnum.api.controllers.v1 import collection -from magnum.api.controllers.v1 import types -from magnum.api import expose -from magnum.api import utils as api_utils -from magnum.api import validation -from magnum.common import exception -from magnum.common import policy -from magnum.i18n import _ -from magnum import objects -from magnum.objects import fields - - -class Quota(base.APIBase): - """API representation of a project Quota. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of Quota. - """ - id = wsme.wsattr(wtypes.IntegerType(minimum=1)) - """unique id""" - - hard_limit = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) - """The hard limit for total number of clusters. Default to 1 if not set""" - - project_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255), - default=None) - """The project id""" - - resource = wsme.wsattr(wtypes.Enum(str, *fields.QuotaResourceName.ALL), - default='Cluster') - """The resource name""" - - def __init__(self, **kwargs): - super(Quota, self).__init__() - self.fields = [] - for field in objects.Quota.fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - @classmethod - def convert(cls, quota): - return Quota(**quota.as_dict()) - - -class QuotaCollection(collection.Collection): - """API representation of a collection of quotas.""" - - quotas = [Quota] - """A list containing quota objects""" - - def __init__(self, **kwargs): - self._type = 'quotas' - - @staticmethod - def convert(quotas, limit, **kwargs): - collection = QuotaCollection() - collection.quotas = [Quota.convert(p) for p in quotas] - collection.next = collection.get_next(limit, - marker_attribute='id', - **kwargs) - return collection - - -class QuotaController(base.Controller): - """REST controller for Quotas.""" - - def __init__(self): - super(QuotaController, self).__init__() - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_quota_collection(self, marker, limit, sort_key, sort_dir, - filters): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Quota.get_by_id(pecan.request.context, - marker) - - quotas = objects.Quota.list(pecan.request.context, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters) - - return QuotaCollection.convert(quotas, - limit, - sort_key=sort_key, - sort_dir=sort_dir) - - @expose.expose(QuotaCollection, int, int, wtypes.text, wtypes.text, - types.boolean) - def get_all(self, marker=None, limit=None, sort_key='id', - sort_dir='asc', all_tenants=False): - """Retrieve a list of quotas. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param all_tenants: a flag to indicate all or current tenant. - """ - context = pecan.request.context - policy.enforce(context, 'quota:get_all', - action='quota:get_all') - - filters = {} - if not context.is_admin or not all_tenants: - filters = {"project_id": context.project_id} - - return self._get_quota_collection(marker, - limit, - sort_key, - sort_dir, - filters) - - @expose.expose(Quota, wtypes.text, wtypes.text) - def get_one(self, project_id, resource): - """Retrieve Quota information for the given project_id. - - :param id: project id. - :param resource: resource name. - """ - context = pecan.request.context - policy.enforce(context, 'quota:get', action='quota:get') - - if not context.is_admin and project_id != context.project_id: - raise exception.NotAuthorized() - - quota = objects.Quota.get_quota_by_project_id_resource(context, - project_id, - resource) - return Quota.convert(quota) - - @expose.expose(Quota, body=Quota, status_code=201) - @validation.enforce_valid_project_id_on_create() - def post(self, quota): - """Create Quota. - - :param quota: a json document to create this Quota. - """ - - context = pecan.request.context - policy.enforce(context, 'quota:create', action='quota:create') - - quota_dict = quota.as_dict() - if 'project_id'not in quota_dict or not quota_dict['project_id']: - msg = _('Must provide a valid project ID.') - raise exception.InvalidParameterValue(message=msg) - - new_quota = objects.Quota(context, **quota_dict) - new_quota.create() - return Quota.convert(new_quota) - - @expose.expose(Quota, wtypes.text, wtypes.text, body=Quota, - status_code=202) - def patch(self, project_id, resource, quotapatch): - """Update Quota for a given project_id. - - :param project_id: project id. - :param resource: resource name. - :param quotapatch: a json document to update Quota. - """ - - context = pecan.request.context - policy.enforce(context, 'quota:update', action='quota:update') - quota_dict = quotapatch.as_dict() - quota_dict['project_id'] = project_id - quota_dict['resource'] = resource - db_quota = objects.Quota.update_quota(context, project_id, quota_dict) - return Quota.convert(db_quota) - - @expose.expose(None, wtypes.text, wtypes.text, status_code=204) - def delete(self, project_id, resource): - """Delete Quota for a given project_id and resource. - - :param project_id: project id. - :param resource: resource name. - """ - - context = pecan.request.context - policy.enforce(context, 'quota:delete', action='quota:delete') - quota_dict = {"project_id": project_id, "resource": resource} - quota = objects.Quota(context, **quota_dict) - quota.delete() diff --git a/magnum/api/controllers/v1/stats.py b/magnum/api/controllers/v1/stats.py deleted file mode 100644 index 9461ffef..00000000 --- a/magnum/api/controllers/v1/stats.py +++ /dev/null @@ -1,72 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from wsme import types as wtypes - -from magnum.api.controllers import base -from magnum.api import expose -from magnum.common import exception -from magnum.common import policy -from magnum.i18n import _ -from magnum import objects - - -class Stats(base.APIBase): - - clusters = wtypes.IntegerType(minimum=0) - nodes = wtypes.IntegerType(minimum=0) - - def __init__(self, **kwargs): - self.fields = [] - for field in objects.Stats.fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - @classmethod - def convert(cls, rpc_stats): - return Stats(**rpc_stats.as_dict()) - - -class StatsController(base.Controller): - """REST controller for Stats.""" - def __init__(self, **kwargs): - super(StatsController, self).__init__() - - @expose.expose(Stats, wtypes.text, wtypes.text) - def get_all(self, project_id=None, type="cluster"): - """Retrieve magnum stats. - - """ - context = pecan.request.context - policy.enforce(context, 'stats:get_all', action='stats:get_all') - allowed_stats = ["cluster"] - - if type.lower() not in allowed_stats: - msg = _("Invalid stats type. Allowed values are '%s'") - allowed_str = ','.join(allowed_stats) - raise exception.InvalidParameterValue(err=msg % allowed_str) - - # 1.If the requester is not an admin and trying to request stats for - # different tenant, then reject the request - # 2.If the requester is not an admin and project_id was not provided, - # then return self stats - if not context.is_admin: - project_id = project_id if project_id else context.project_id - if project_id != context.project_id: - raise exception.NotAuthorized() - - stats = objects.Stats.get_cluster_stats(context, project_id) - return Stats.convert(stats) diff --git a/magnum/api/controllers/v1/types.py b/magnum/api/controllers/v1/types.py deleted file mode 100644 index 220560b0..00000000 --- a/magnum/api/controllers/v1/types.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect - -from oslo_utils import strutils -from oslo_utils import uuidutils -import wsme -from wsme import types as wtypes - -from magnum.common import exception -from magnum.common import utils -from magnum.i18n import _ - - -class MacAddressType(wtypes.UserType): - """A simple MAC address type.""" - - basetype = wtypes.text - name = 'macaddress' - - @staticmethod - def validate(value): - return utils.validate_and_normalize_mac(value) - - @staticmethod - def frombasetype(value): - if value is None: - return None - return MacAddressType.validate(value) - - -class NameType(wtypes.UserType): - """A logical name type.""" - - basetype = wtypes.text - name = 'name' - - @staticmethod - def validate(value): - if not utils.is_name_safe(value): - raise exception.InvalidName(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return NameType.validate(value) - - -class UuidType(wtypes.UserType): - """A simple UUID type.""" - - basetype = wtypes.text - name = 'uuid' - - @staticmethod - def validate(value): - if not uuidutils.is_uuid_like(value): - raise exception.InvalidUUID(uuid=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return UuidType.validate(value) - - -class BooleanType(wtypes.UserType): - """A simple boolean type.""" - - basetype = wtypes.text - name = 'boolean' - - @staticmethod - def validate(value): - try: - return strutils.bool_from_string(value, strict=True) - except ValueError as e: - # raise Invalid to return 400 (BadRequest) in the API - raise exception.Invalid(e) - - @staticmethod - def frombasetype(value): - if value is None: - return None - return BooleanType.validate(value) - - -class MultiType(wtypes.UserType): - """A complex type that represents one or more types. - - Used for validating that a value is an instance of one of the types. - - :param types: Variable-length list of types. - - """ - basetype = wtypes.text - - def __init__(self, *types): - self.types = types - - def __str__(self): - return ' | '.join(map(str, self.types)) - - def validate(self, value): - for t in self.types: - try: - return wtypes.validate_value(t, value) - except (exception.InvalidUUID, ValueError): - pass - else: - raise ValueError( - _("Wrong type. Expected '%(type)s', got '%(value)s'") - % {'type': self.types, 'value': type(value)}) - - -macaddress = MacAddressType() -uuid = UuidType() -name = NameType() -uuid_or_name = MultiType(UuidType, NameType) -boolean = BooleanType() - - -class JsonPatchType(wtypes.Base): - """A complex type that represents a single json-patch operation.""" - - path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'), - mandatory=True) - op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), - mandatory=True) - value = MultiType(wtypes.text, int) - - # The class of the objects being patched. Override this in subclasses. - # Should probably be a subclass of magnum.api.controllers.base.APIBase. - _api_base = None - - # Attributes that are not required for construction, but which may not be - # removed if set. Override in subclasses if needed. - _extra_non_removable_attrs = set() - - # Set of non-removable attributes, calculated lazily. - _non_removable_attrs = None - - @staticmethod - def internal_attrs(): - """Returns a list of internal attributes. - - Internal attributes can't be added, replaced or removed. This - method may be overwritten by derived class. - - """ - return ['/created_at', '/id', '/links', '/updated_at', - '/uuid', '/project_id', '/user_id'] - - @classmethod - def non_removable_attrs(cls): - """Returns a set of names of attributes that may not be removed. - - Attributes whose 'mandatory' property is True are automatically added - to this set. To add additional attributes to the set, override the - field _extra_non_removable_attrs in subclasses, with a set of the form - {'/foo', '/bar'}. - """ - if cls._non_removable_attrs is None: - cls._non_removable_attrs = cls._extra_non_removable_attrs.copy() - if cls._api_base: - fields = inspect.getmembers(cls._api_base, - lambda a: not inspect.isroutine(a)) - for name, field in fields: - if getattr(field, 'mandatory', False): - cls._non_removable_attrs.add('/%s' % name) - return cls._non_removable_attrs - - @staticmethod - def validate(patch): - if patch.path in patch.internal_attrs(): - msg = _("'%s' is an internal attribute and can not be updated") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.path in patch.non_removable_attrs() and patch.op == 'remove': - msg = _("'%s' is a mandatory attribute and can not be removed") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.op != 'remove': - if not patch.value: - msg = _("'add' and 'replace' operations needs value") - raise wsme.exc.ClientSideError(msg) - - ret = {'path': patch.path, 'op': patch.op} - if patch.value: - ret['value'] = patch.value - return ret diff --git a/magnum/api/controllers/versions.py b/magnum/api/controllers/versions.py deleted file mode 100644 index 67caafb0..00000000 --- a/magnum/api/controllers/versions.py +++ /dev/null @@ -1,152 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from webob import exc - -from magnum.i18n import _ - -# NOTE(yuntong): v1.0 is reserved to indicate Kilo's API, but is not presently -# supported by the API service. All changes between Kilo and the -# point where we added microversioning are considered backwards- -# compatible, but are not specifically discoverable at this time. -# -# The v1.1 version indicates this "initial" version as being -# different from Kilo (v1.0), and includes the following changes: -# -# Add details of new api versions here: - -# -# For each newly added microversion change, update the API version history -# string below with a one or two line description. Also update -# rest_api_version_history.rst for extra information on microversion. -REST_API_VERSION_HISTORY = """REST API Version History: - - * 1.1 - Initial version - * 1.2 - Async bay operations support - * 1.3 - Add bay rollback support - * 1.4 - Add stats API - * 1.5 - Add cluster CA certificate rotation support - * 1.6 - Add quotas API -""" - -BASE_VER = '1.1' -CURRENT_MAX_VER = '1.6' - - -class Version(object): - """API Version object.""" - - string = 'OpenStack-API-Version' - """HTTP Header string carrying the requested version""" - - min_string = 'OpenStack-API-Minimum-Version' - """HTTP response header""" - - max_string = 'OpenStack-API-Maximum-Version' - """HTTP response header""" - - service_string = 'container-infra' - - def __init__(self, headers, default_version, latest_version, - from_string=None): - """Create an API Version object from the supplied headers. - - :param headers: webob headers - :param default_version: version to use if not specified in headers - :param latest_version: version to use if latest is requested - :param from_string: create the version from string not headers - :raises: webob.HTTPNotAcceptable - """ - if from_string: - (self.major, self.minor) = tuple(int(i) - for i in from_string.split('.')) - - else: - (self.major, self.minor) = Version.parse_headers(headers, - default_version, - latest_version) - - def __repr__(self): - return '%s.%s' % (self.major, self.minor) - - @staticmethod - def parse_headers(headers, default_version, latest_version): - """Determine the API version requested based on the headers supplied. - - :param headers: webob headers - :param default_version: version to use if not specified in headers - :param latest_version: version to use if latest is requested - :returns: a tuple of (major, minor) version numbers - :raises: webob.HTTPNotAcceptable - """ - - version_hdr = headers.get(Version.string, default_version) - - try: - version_service, version_str = version_hdr.split() - except ValueError: - raise exc.HTTPNotAcceptable(_( - "Invalid service type for %s header") % Version.string) - - if version_str.lower() == 'latest': - version_service, version_str = latest_version.split() - - if version_service != Version.service_string: - raise exc.HTTPNotAcceptable(_( - "Invalid service type for %s header") % Version.string) - try: - version = tuple(int(i) for i in version_str.split('.')) - except ValueError: - version = () - - if len(version) != 2: - raise exc.HTTPNotAcceptable(_( - "Invalid value for %s header") % Version.string) - return version - - def is_null(self): - return self.major == 0 and self.minor == 0 - - def matches(self, start_version, end_version): - if self.is_null(): - raise ValueError - - return start_version <= self <= end_version - - def __lt__(self, other): - if self.major < other.major: - return True - if self.major == other.major and self.minor < other.minor: - return True - return False - - def __gt__(self, other): - if self.major > other.major: - return True - if self.major == other.major and self.minor > other.minor: - return True - return False - - def __eq__(self, other): - return self.major == other.major and self.minor == other.minor - - def __le__(self, other): - return self < other or self == other - - def __ne__(self, other): - return not self.__eq__(other) - - def __ge__(self, other): - return self > other or self == other diff --git a/magnum/api/expose.py b/magnum/api/expose.py deleted file mode 100644 index f53a7dfb..00000000 --- a/magnum/api/expose.py +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import wsmeext.pecan as wsme_pecan - - -def expose(*args, **kwargs): - """Ensure that only JSON, and not XML, is supported.""" - if 'rest_content_types' not in kwargs: - kwargs['rest_content_types'] = ('json',) - return wsme_pecan.wsexpose(*args, **kwargs) diff --git a/magnum/api/hooks.py b/magnum/api/hooks.py deleted file mode 100644 index 8ad9c195..00000000 --- a/magnum/api/hooks.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pecan import hooks - -from magnum.common import context -from magnum.conductor import api as conductor_api -import magnum.conf - -CONF = magnum.conf.CONF - - -class ContextHook(hooks.PecanHook): - """Configures a request context and attaches it to the request. - - The following HTTP request headers are used: - - X-User-Name: - Used for context.user_name. - - X-User-Id: - Used for context.user_id. - - X-Project-Name: - Used for context.project. - - X-Project-Id: - Used for context.project_id. - - X-Auth-Token: - Used for context.auth_token. - - X-Roles: - Used for context.roles. - """ - - def before(self, state): - headers = state.request.headers - user_name = headers.get('X-User-Name') - user_id = headers.get('X-User-Id') - project = headers.get('X-Project-Name') - project_id = headers.get('X-Project-Id') - domain_id = headers.get('X-User-Domain-Id') - domain_name = headers.get('X-User-Domain-Name') - auth_token = headers.get('X-Auth-Token') - roles = headers.get('X-Roles', '').split(',') - auth_token_info = state.request.environ.get('keystone.token_info') - - auth_url = CONF.keystone_authtoken.auth_uri - - state.request.context = context.make_context( - auth_token=auth_token, - auth_url=auth_url, - auth_token_info=auth_token_info, - user_name=user_name, - user_id=user_id, - project_name=project, - project_id=project_id, - domain_id=domain_id, - domain_name=domain_name, - roles=roles) - - -class RPCHook(hooks.PecanHook): - """Attach the rpcapi object to the request so controllers can get to it.""" - - def before(self, state): - state.request.rpcapi = conductor_api.API(context=state.request.context) - - -class NoExceptionTracebackHook(hooks.PecanHook): - """Workaround rpc.common: deserialize_remote_exception. - - deserialize_remote_exception builds rpc exception traceback into error - message which is then sent to the client. Such behavior is a security - concern so this hook is aimed to cut-off traceback from the error message. - """ - # NOTE(max_lobur): 'after' hook used instead of 'on_error' because - # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator - # catches and handles all the errors, so 'on_error' dedicated for unhandled - # exceptions never fired. - def after(self, state): - # Omit empty body. Some errors may not have body at this level yet. - if not state.response.body: - return - - # Do nothing if there is no error. - if 200 <= state.response.status_int < 400: - return - - json_body = state.response.json - # Do not remove traceback when server in debug mode (except 'Server' - # errors when 'debuginfo' will be used for traces). - if CONF.debug and json_body.get('faultcode') != 'Server': - return - - faultsting = json_body.get('faultstring') - traceback_marker = 'Traceback (most recent call last):' - if faultsting and (traceback_marker in faultsting): - # Cut-off traceback. - faultsting = faultsting.split(traceback_marker, 1)[0] - # Remove trailing newlines and spaces if any. - json_body['faultstring'] = faultsting.rstrip() - # Replace the whole json. Cannot change original one because it's - # generated on the fly. - state.response.json = json_body diff --git a/magnum/api/http_error.py b/magnum/api/http_error.py deleted file mode 100644 index 04167b75..00000000 --- a/magnum/api/http_error.py +++ /dev/null @@ -1,70 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import six -from webob import exc - - -class HTTPNotAcceptableAPIVersion(exc.HTTPNotAcceptable): - # subclass of :class:`~HTTPNotAcceptable` - # - # This indicates the resource identified by the request is only - # capable of generating response entities which have content - # characteristics not acceptable according to the accept headers - # sent in the request. - # - # code: 406, title: Not Acceptable - # - # differences from webob.exc.HTTPNotAcceptable: - # - # - additional max and min version parameters - # - additional error info for code, title, and links - code = 406 - title = 'Not Acceptable' - max_version = '' - min_version = '' - - def __init__(self, detail=None, headers=None, comment=None, - body_template=None, max_version='', min_version='', **kw): - - super(HTTPNotAcceptableAPIVersion, self).__init__( - detail=detail, headers=headers, comment=comment, - body_template=body_template, **kw) - - self.max_version = max_version - self.min_version = min_version - - def __call__(self, environ, start_response): - for err_str in self.app_iter: - err = {} - try: - err = json.loads(err_str.decode('utf-8')) - except ValueError: - pass - - links = {'rel': 'help', 'href': 'http://developer.openstack.org' - '/api-guide/compute/microversions.html'} - - err['max_version'] = self.max_version - err['min_version'] = self.min_version - err['code'] = "magnum.microversion-unsupported" - err['links'] = [links] - err['title'] = "Requested microversion is unsupported" - - self.app_iter = [six.b(json.dumps(err))] - self.headers['Content-Length'] = str(len(self.app_iter[0])) - - return super(HTTPNotAcceptableAPIVersion, self).__call__( - environ, start_response) diff --git a/magnum/api/middleware/__init__.py b/magnum/api/middleware/__init__.py deleted file mode 100644 index ee159314..00000000 --- a/magnum/api/middleware/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.api.middleware import auth_token -from magnum.api.middleware import parsable_error - - -AuthTokenMiddleware = auth_token.AuthTokenMiddleware -ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware - -__all__ = (AuthTokenMiddleware, - ParsableErrorMiddleware) diff --git a/magnum/api/middleware/auth_token.py b/magnum/api/middleware/auth_token.py deleted file mode 100644 index 788eb0b1..00000000 --- a/magnum/api/middleware/auth_token.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from keystonemiddleware import auth_token -from oslo_log import log - -from magnum.common import exception -from magnum.common import utils -from magnum.i18n import _ - -LOG = log.getLogger(__name__) - - -class AuthTokenMiddleware(auth_token.AuthProtocol): - """A wrapper on Keystone auth_token middleware. - - Does not perform verification of authentication tokens - for public routes in the API. - - """ - def __init__(self, app, conf, public_api_routes=None): - if public_api_routes is None: - public_api_routes = [] - route_pattern_tpl = '%s(\.json)?$' - - try: - self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) - for route_tpl in public_api_routes] - except re.error as e: - msg = _('Cannot compile public API routes: %s') % e - - LOG.error(msg) - raise exception.ConfigInvalid(error_msg=msg) - - super(AuthTokenMiddleware, self).__init__(app, conf) - - def __call__(self, env, start_response): - path = utils.safe_rstrip(env.get('PATH_INFO'), '/') - - # The information whether the API call is being performed against the - # public API is required for some other components. Saving it to the - # WSGI environment is reasonable thereby. - env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), - self.public_api_routes)) - - if env['is_public_api']: - return self._app(env, start_response) - - return super(AuthTokenMiddleware, self).__call__(env, start_response) - - @classmethod - def factory(cls, global_config, **local_conf): - public_routes = local_conf.get('acl_public_routes', '') - public_api_routes = [path.strip() for path in public_routes.split(',')] - - def _factory(app): - return cls(app, global_config, public_api_routes=public_api_routes) - return _factory diff --git a/magnum/api/middleware/parsable_error.py b/magnum/api/middleware/parsable_error.py deleted file mode 100644 index e3cff047..00000000 --- a/magnum/api/middleware/parsable_error.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright ? 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Middleware to replace the plain text message body of an error -response with one formatted so the client can parse it. - -Based on pecan.middleware.errordocument -""" - -import json -import six - -from magnum.i18n import _ - - -class ParsableErrorMiddleware(object): - """Replace error body with something the client can parse.""" - def __init__(self, app): - self.app = app - - def _update_errors(self, app_iter, status_code): - errs = [] - for err_str in app_iter: - err = {} - try: - err = json.loads(err_str.decode('utf-8')) - except ValueError: - pass - - if 'title' in err and 'description' in err: - title = err['title'] - desc = err['description'] - elif 'faultstring' in err: - title = err['faultstring'].split('.', 1)[0] - desc = err['faultstring'] - else: - title = '' - desc = '' - - code = err['faultcode'].lower() if 'faultcode' in err else '' - - # if already formatted by custom exception, don't update - if 'min_version' in err: - errs.append(err) - else: - errs.append({ - 'request_id': '', - 'code': code, - 'status': status_code, - 'title': title, - 'detail': desc, - 'links': []}) - - return errs - - def __call__(self, environ, start_response): - # Request for this state, modified by replace_start_response() - # and used when an error is being reported. - state = {} - - def replacement_start_response(status, headers, exc_info=None): - """Overrides the default response to make errors parsable.""" - try: - status_code = int(status.split(' ')[0]) - state['status_code'] = status_code - except (ValueError, TypeError): # pragma: nocover - raise Exception(_( - 'ErrorDocumentMiddleware received an invalid ' - 'status %s') % status) - else: - if (state['status_code'] // 100) not in (2, 3): - # Remove some headers so we can replace them later - # when we have the full error message and can - # compute the length. - headers = [(h, v) - for (h, v) in headers - if h not in ('Content-Length', 'Content-Type') - ] - # Save the headers in case we need to modify them. - state['headers'] = headers - - return start_response(status, headers, exc_info) - - app_iter = self.app(environ, replacement_start_response) - - if (state['status_code'] // 100) not in (2, 3): - errs = self._update_errors(app_iter, state['status_code']) - body = [six.b(json.dumps({'errors': errs}))] - state['headers'].append(('Content-Type', 'application/json')) - state['headers'].append(('Content-Length', str(len(body[0])))) - - else: - body = app_iter - return body diff --git a/magnum/api/rest_api_version_history.rst b/magnum/api/rest_api_version_history.rst deleted file mode 100644 index a9aa04b8..00000000 --- a/magnum/api/rest_api_version_history.rst +++ /dev/null @@ -1,77 +0,0 @@ -REST API Version History -======================== - -This documents the changes made to the REST API with every -microversion change. The description for each version should be a -verbose one which has enough information to be suitable for use in -user documentation. - -1.1 ---- - - This is the initial version of the v1.1 API which supports - microversions. The v1.1 API is from the REST API users's point of - view exactly the same as v1.0 except with strong input validation. - - A user can specify a header in the API request:: - - OpenStack-API-Version: - - where ```` is any valid api version for this API. - - If no version is specified then the API will behave as if a version - request of v1.1 was requested. - -1.2 ---- - - Support for async cluster (previously known as bay) operations - - Before v1.2 all magnum bay operations were synchronous and as a result API - requests were blocked until response from HEAT service is received. - With this change cluster-create/bay-create, cluster-update/bay-update and - cluster-delete/bay-delete calls will be asynchronous. - - -1.3 ---- - - Rollback cluster (previously known as bay) on update failure - - User can enable rollback on bay update failure by specifying microversion - 1.3 in header({'OpenStack-API-Version': 'container-infra 1.3'}) and passing - 'rollback=True' when issuing cluster/bay update request. - For example:- - - http://XXX/v1/clusters/XXX/?rollback=True or - - http://XXX/v1/bays/XXX/?rollback=True - - -1.4 ---- - - Add stats API - - An admin user can get total number of clusters and nodes for a specified - tenant or for all the tenants and also a non-admin user can get self stats. - For example:- - - http://XXX/v1/stats or - - http://XXX/v1/stats?project_id= or - - http://XXX/v1/stats?project_id=&type= - - -1.5 ---- - - Support for cluster CA certificate rotation - - This gives admins a way to revoke access to an existing cluster once - a user has been granted access. - - -1.6 ---- - - Add quotas API - - An admin user can set/update/delete/list quotas for the given tenant. - A non-admin user can get self quota information. diff --git a/magnum/api/servicegroup.py b/magnum/api/servicegroup.py deleted file mode 100644 index f65ef6a3..00000000 --- a/magnum/api/servicegroup.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import timeutils - -import magnum.conf -from magnum.objects import magnum_service - -CONF = magnum.conf.CONF - - -class ServiceGroup(object): - def __init__(self): - self.service_down_time = CONF.service_down_time - - def service_is_up(self, member): - if not isinstance(member, magnum_service.MagnumService): - raise TypeError - if member.forced_down: - return False - - last_heartbeat = (member.last_seen_up or - member.updated_at or member.created_at) - now = timeutils.utcnow(True) - elapsed = timeutils.delta_seconds(last_heartbeat, now) - is_up = abs(elapsed) <= self.service_down_time - return is_up diff --git a/magnum/api/utils.py b/magnum/api/utils.py deleted file mode 100644 index 1bc2a3ec..00000000 --- a/magnum/api/utils.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonpatch -from oslo_utils import uuidutils -import pecan -import wsme - -from magnum.common import exception -from magnum.common import utils -import magnum.conf -from magnum.i18n import _ -from magnum import objects - -CONF = magnum.conf.CONF - - -JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, - jsonpatch.JsonPointerException, - KeyError) - - -DOCKER_MINIMUM_MEMORY = 4 * 1024 * 1024 - - -def validate_limit(limit): - if limit is not None and limit <= 0: - raise wsme.exc.ClientSideError(_("Limit must be positive")) - - if limit is not None: - return min(CONF.api.max_limit, limit) - else: - return CONF.api.max_limit - - -def validate_sort_dir(sort_dir): - if sort_dir not in ['asc', 'desc']: - raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " - "Acceptable values are " - "'asc' or 'desc'") % sort_dir) - return sort_dir - - -def validate_docker_memory(mem_str): - """Docker require that Minimum memory limit >= 4M.""" - try: - mem = utils.get_docker_quantity(mem_str) - except exception.UnsupportedDockerQuantityFormat: - raise wsme.exc.ClientSideError(_("Invalid docker memory specified. " - "Acceptable values are format: " - "[]," - "where unit = b, k, m or g")) - if mem < DOCKER_MINIMUM_MEMORY: - raise wsme.exc.ClientSideError(_("Docker Minimum memory limit" - "allowed is %d B.") - % DOCKER_MINIMUM_MEMORY) - - -def apply_jsonpatch(doc, patch): - for p in patch: - if p['op'] == 'add' and p['path'].count('/') == 1: - attr = p['path'].lstrip('/') - if attr not in doc: - msg = _("Adding a new attribute %s to the root of " - "the resource is not allowed.") % p['path'] - raise wsme.exc.ClientSideError(msg) - if doc[attr] is not None: - msg = _("The attribute %s has existed, please use " - "'replace' operation instead.") % p['path'] - raise wsme.exc.ClientSideError(msg) - return jsonpatch.apply_patch(doc, patch) - - -def get_resource(resource, resource_ident): - """Get the resource from the uuid or logical name. - - :param resource: the resource type. - :param resource_ident: the UUID or logical name of the resource. - - :returns: The resource. - """ - resource = getattr(objects, resource) - - if uuidutils.is_uuid_like(resource_ident): - return resource.get_by_uuid(pecan.request.context, resource_ident) - - return resource.get_by_name(pecan.request.context, resource_ident) - - -def get_openstack_resource(manager, resource_ident, resource_type): - """Get the openstack resource from the uuid or logical name. - - :param manager: the resource manager class. - :param resource_ident: the UUID or logical name of the resource. - :param resource_type: the type of the resource - - :returns: The openstack resource. - :raises: ResourceNotFound if the openstack resource is not exist. - Conflict if multi openstack resources have same name. - """ - if uuidutils.is_uuid_like(resource_ident): - resource_data = manager.get(resource_ident) - else: - filters = {'name': resource_ident} - matches = list(manager.list(filters=filters)) - if len(matches) == 0: - raise exception.ResourceNotFound(name=resource_type, - id=resource_ident) - if len(matches) > 1: - msg = ("Multiple %(resource_type)s exist with same name " - "%(resource_ident)s. Please use the resource id " - "instead." % {'resource_type': resource_type, - 'resource_ident': resource_ident}) - raise exception.Conflict(msg) - resource_data = matches[0] - return resource_data diff --git a/magnum/api/validation.py b/magnum/api/validation.py deleted file mode 100644 index ff852df3..00000000 --- a/magnum/api/validation.py +++ /dev/null @@ -1,309 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import decorator - -import pecan - -from keystoneauth1 import exceptions as ka_exception - -from magnum.api import utils as api_utils -from magnum.common import clients -from magnum.common import exception -import magnum.conf -from magnum.drivers.common import driver -from magnum.i18n import _ -from magnum import objects - -CONF = magnum.conf.CONF - -cluster_update_allowed_properties = set(['node_count']) - - -def enforce_cluster_type_supported(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - cluster = args[1] - cluster_template = objects.ClusterTemplate.get_by_uuid( - pecan.request.context, cluster.cluster_template_id) - cluster_type = (cluster_template.server_type, - cluster_template.cluster_distro, - cluster_template.coe) - driver.Driver.get_driver(*cluster_type) - return func(*args, **kwargs) - - return wrapper - - -def enforce_cluster_volume_storage_size(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - cluster = args[1] - cluster_template = objects.ClusterTemplate.get_by_uuid( - pecan.request.context, cluster.cluster_template_id) - _enforce_volume_storage_size( - cluster_template.as_dict(), cluster.as_dict()) - return func(*args, **kwargs) - - return wrapper - - -def enforce_valid_project_id_on_create(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - quota = args[1] - _validate_project_id(quota.project_id) - return func(*args, **kwargs) - - return wrapper - - -def _validate_project_id(project_id): - try: - context = pecan.request.context - osc = clients.OpenStackClients(context) - osc.keystone().domain_admin_client.projects.get(project_id) - except ka_exception.http.NotFound: - raise exception.ProjectNotFound(name='project_id', - id=project_id) - - -def enforce_network_driver_types_create(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - cluster_template = args[1] - _enforce_network_driver_types(cluster_template) - return func(*args, **kwargs) - - return wrapper - - -def enforce_network_driver_types_update(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - cluster_template_ident = args[1] - patch = args[2] - cluster_template = api_utils.get_resource('ClusterTemplate', - cluster_template_ident) - try: - cluster_template_dict = api_utils.apply_jsonpatch( - cluster_template.as_dict(), patch) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - cluster_template = objects.ClusterTemplate(pecan.request.context, - **cluster_template_dict) - _enforce_network_driver_types(cluster_template) - return func(*args, **kwargs) - - return wrapper - - -def _enforce_network_driver_types(cluster_template): - validator = Validator.get_coe_validator(cluster_template.coe) - if not cluster_template.network_driver: - cluster_template.network_driver = validator.default_network_driver - validator.validate_network_driver(cluster_template.network_driver) - - -def enforce_server_type(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - cluster_template = args[1] - _enforce_server_type(cluster_template) - return func(*args, **kwargs) - - return wrapper - - -def _enforce_server_type(cluster_template): - validator = Validator.get_coe_validator(cluster_template.coe) - validator.validate_server_type(cluster_template.server_type) - - -def enforce_volume_driver_types_create(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - cluster_template = args[1] - _enforce_volume_driver_types(cluster_template.as_dict()) - return func(*args, **kwargs) - - return wrapper - - -def enforce_volume_storage_size_create(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - cluster_template = args[1] - _enforce_volume_storage_size(cluster_template.as_dict(), {}) - return func(*args, **kwargs) - - return wrapper - - -def enforce_volume_driver_types_update(): - @decorator.decorator - def wrapper(func, *args, **kwargs): - cluster_template_ident = args[1] - patch = args[2] - cluster_template = api_utils.get_resource('ClusterTemplate', - cluster_template_ident) - try: - cluster_template_dict = api_utils.apply_jsonpatch( - cluster_template.as_dict(), patch) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - _enforce_volume_driver_types(cluster_template_dict) - return func(*args, **kwargs) - - return wrapper - - -def _enforce_volume_driver_types(cluster_template): - validator = Validator.get_coe_validator(cluster_template['coe']) - if not cluster_template.get('volume_driver'): - return - validator.validate_volume_driver(cluster_template['volume_driver']) - - -def _enforce_volume_storage_size(cluster_template, cluster): - volume_size = cluster.get('docker_volume_size') \ - or cluster_template.get('docker_volume_size') - if not volume_size: - return - storage_driver = cluster_template.get('docker_storage_driver') - if storage_driver == 'devicemapper': - if volume_size < 3: - raise exception.InvalidParameterValue( - 'docker volume size %s GB is not valid, ' - 'expecting minimum value 3GB for %s storage ' - 'driver.' % (volume_size, storage_driver)) - - -def validate_cluster_properties(delta): - - update_disallowed_properties = delta - cluster_update_allowed_properties - if update_disallowed_properties: - err = (_("cannot change cluster property(ies) %s.") % - ", ".join(update_disallowed_properties)) - raise exception.InvalidParameterValue(err=err) - - -class Validator(object): - - @classmethod - def get_coe_validator(cls, coe): - if coe == 'kubernetes': - return K8sValidator() - elif coe == 'swarm' or coe == 'swarm-mode': - return SwarmValidator() - elif coe == 'mesos': - return MesosValidator() - else: - raise exception.InvalidParameterValue( - _('Requested COE type %s is not supported.') % coe) - - @classmethod - def validate_network_driver(cls, driver): - cls._validate_network_driver_supported(driver) - cls._validate_network_driver_allowed(driver) - - @classmethod - def _validate_network_driver_supported(cls, driver): - """Confirm that driver is supported by Magnum for this COE.""" - if driver not in cls.supported_network_drivers: - raise exception.InvalidParameterValue(_( - 'Network driver type %(driver)s is not supported, ' - 'expecting a %(supported_drivers)s network driver.') % { - 'driver': driver, - 'supported_drivers': '/'.join( - cls.supported_network_drivers + ['unspecified'])}) - - @classmethod - def _validate_network_driver_allowed(cls, driver): - """Confirm that driver is allowed via configuration for this COE.""" - if ('all' not in cls.allowed_network_drivers and - driver not in cls.allowed_network_drivers): - raise exception.InvalidParameterValue(_( - 'Network driver type %(driver)s is not allowed, ' - 'expecting a %(allowed_drivers)s network driver. ') % { - 'driver': driver, - 'allowed_drivers': '/'.join( - cls.allowed_network_drivers + ['unspecified'])}) - - @classmethod - def validate_volume_driver(cls, driver): - cls._validate_volume_driver_supported(driver) - - @classmethod - def _validate_volume_driver_supported(cls, driver): - """Confirm that volume driver is supported by Magnum for this COE.""" - if driver not in cls.supported_volume_driver: - raise exception.InvalidParameterValue(_( - 'Volume driver type %(driver)s is not supported, ' - 'expecting a %(supported_volume_driver)s volume driver.') % { - 'driver': driver, - 'supported_volume_driver': '/'.join( - cls.supported_volume_driver + ['unspecified'])}) - - @classmethod - def validate_server_type(cls, server_type): - cls._validate_server_type(server_type) - - @classmethod - def _validate_server_type(cls, server_type): - """Confirm that server type is supported by Magnum for this COE.""" - if server_type not in cls.supported_server_types: - raise exception.InvalidParameterValue(_( - 'Server type %(server_type)s is not supported, ' - 'expecting a %(supported_server_types)s server type.') % { - 'server_type': server_type, - 'supported_server_types': '/'.join( - cls.supported_server_types + ['unspecified'])}) - - -class K8sValidator(Validator): - - supported_network_drivers = ['flannel'] - supported_server_types = ['vm', 'bm'] - allowed_network_drivers = ( - CONF.cluster_template.kubernetes_allowed_network_drivers) - default_network_driver = ( - CONF.cluster_template.kubernetes_default_network_driver) - - supported_volume_driver = ['cinder'] - - -class SwarmValidator(Validator): - - supported_network_drivers = ['docker', 'flannel'] - supported_server_types = ['vm', 'bm'] - allowed_network_drivers = (CONF.cluster_template. - swarm_allowed_network_drivers) - default_network_driver = (CONF.cluster_template. - swarm_default_network_driver) - - supported_volume_driver = ['rexray'] - - -class MesosValidator(Validator): - - supported_network_drivers = ['docker'] - supported_server_types = ['vm', 'bm'] - allowed_network_drivers = (CONF.cluster_template. - mesos_allowed_network_drivers) - default_network_driver = (CONF.cluster_template. - mesos_default_network_driver) - - supported_volume_driver = ['rexray'] diff --git a/magnum/api/versioned_method.py b/magnum/api/versioned_method.py deleted file mode 100644 index 2acf65af..00000000 --- a/magnum/api/versioned_method.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class VersionedMethod(object): - - def __init__(self, name, start_version, end_version, func): - """Versioning information for a single method - - @name: Name of the method - @start_version: Minimum acceptable version - @end_version: Maximum acceptable version - @func: Method to call - - Minimum and maximum are inclusive - """ - self.name = name - self.start_version = start_version - self.end_version = end_version - self.func = func - - def __str__(self): - return ("Version Method %s: min: %s, max: %s" - % (self.name, self.start_version, self.end_version)) diff --git a/magnum/cmd/__init__.py b/magnum/cmd/__init__.py deleted file mode 100644 index 277b2af3..00000000 --- a/magnum/cmd/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2017 Fujitsu Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(hieulq): we monkey patch all eventlet services for easier tracking/debug - -import eventlet - -eventlet.monkey_patch() diff --git a/magnum/cmd/api.py b/magnum/cmd/api.py deleted file mode 100755 index 9c228a27..00000000 --- a/magnum/cmd/api.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2013 - Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Starter script for the Magnum API service.""" - -import os -import sys - -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from werkzeug import serving - -from magnum.api import app as api_app -from magnum.common import profiler -from magnum.common import service -import magnum.conf -from magnum.i18n import _ -from magnum.objects import base -from magnum import version - - -CONF = magnum.conf.CONF -LOG = logging.getLogger(__name__) - - -def _get_ssl_configs(use_ssl): - if use_ssl: - cert_file = CONF.api.ssl_cert_file - key_file = CONF.api.ssl_key_file - - if cert_file and not os.path.exists(cert_file): - raise RuntimeError( - _("Unable to find cert_file : %s") % cert_file) - - if key_file and not os.path.exists(key_file): - raise RuntimeError( - _("Unable to find key_file : %s") % key_file) - - return cert_file, key_file - else: - return None - - -def main(): - service.prepare_service(sys.argv) - - gmr.TextGuruMeditation.setup_autorun(version) - - # Enable object backporting via the conductor - base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI() - - app = api_app.load_app() - - # Setup OSprofiler for WSGI service - profiler.setup('magnum-api', CONF.host) - - # SSL configuration - use_ssl = CONF.api.enabled_ssl - - # Create the WSGI server and start it - host, port = CONF.api.host, CONF.api.port - - LOG.info('Starting server in PID %s', os.getpid()) - LOG.debug("Configuration:") - CONF.log_opt_values(LOG, logging.DEBUG) - - LOG.info('Serving on %(proto)s://%(host)s:%(port)s', - dict(proto="https" if use_ssl else "http", host=host, port=port)) - - workers = CONF.api.workers - if not workers: - workers = processutils.get_worker_count() - LOG.info('Server will handle each request in a new process up to' - ' %s concurrent processes', workers) - serving.run_simple(host, port, app, processes=workers, - ssl_context=_get_ssl_configs(use_ssl)) diff --git a/magnum/cmd/conductor.py b/magnum/cmd/conductor.py deleted file mode 100755 index f9e94fdc..00000000 --- a/magnum/cmd/conductor.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2014 - Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Starter script for the Magnum conductor service.""" - -import os -import sys - -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_service import service - -from magnum.common import rpc_service -from magnum.common import service as magnum_service -from magnum.common import short_id -from magnum.conductor.handlers import ca_conductor -from magnum.conductor.handlers import cluster_conductor -from magnum.conductor.handlers import conductor_listener -from magnum.conductor.handlers import indirection_api -import magnum.conf -from magnum import version - -CONF = magnum.conf.CONF -LOG = logging.getLogger(__name__) - - -def main(): - magnum_service.prepare_service(sys.argv) - - gmr.TextGuruMeditation.setup_autorun(version) - - LOG.info('Starting server in PID %s', os.getpid()) - LOG.debug("Configuration:") - CONF.log_opt_values(LOG, logging.DEBUG) - - conductor_id = short_id.generate_id() - endpoints = [ - indirection_api.Handler(), - cluster_conductor.Handler(), - conductor_listener.Handler(), - ca_conductor.Handler(), - ] - - server = rpc_service.Service.create(CONF.conductor.topic, - conductor_id, endpoints, - binary='magnum-conductor') - workers = CONF.conductor.workers - if not workers: - workers = processutils.get_worker_count() - launcher = service.launch(CONF, server, workers=workers) - launcher.wait() diff --git a/magnum/cmd/db_manage.py b/magnum/cmd/db_manage.py deleted file mode 100644 index 44a16e5c..00000000 --- a/magnum/cmd/db_manage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for magnum-db-manage.""" - -from oslo_config import cfg - -from magnum.db import migration - - -CONF = cfg.CONF - - -def do_version(): - print('Current DB revision is %s' % migration.version()) - - -def do_upgrade(): - migration.upgrade(CONF.command.revision) - - -def do_stamp(): - migration.stamp(CONF.command.revision) - - -def do_revision(): - migration.revision(message=CONF.command.message, - autogenerate=CONF.command.autogenerate) - - -def add_command_parsers(subparsers): - parser = subparsers.add_parser('version') - parser.set_defaults(func=do_version) - - parser = subparsers.add_parser('upgrade') - parser.add_argument('revision', nargs='?') - parser.set_defaults(func=do_upgrade) - - parser = subparsers.add_parser('stamp') - parser.add_argument('revision') - parser.set_defaults(func=do_stamp) - - parser = subparsers.add_parser('revision') - parser.add_argument('-m', '--message') - parser.add_argument('--autogenerate', action='store_true') - parser.set_defaults(func=do_revision) - - -command_opt = cfg.SubCommandOpt('command', - title='Command', - help='Available commands', - handler=add_command_parsers) - - -def main(): - CONF.register_cli_opt(command_opt) - - CONF(project='magnum') - CONF.command.func() diff --git a/magnum/cmd/driver_manage.py b/magnum/cmd/driver_manage.py deleted file mode 100644 index 40c7acbe..00000000 --- a/magnum/cmd/driver_manage.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for magnum-driver-manage.""" -import sys - -from cliff import app -from cliff import commandmanager -from cliff import lister - -import magnum.conf -from magnum.drivers.common import driver -from magnum import version - -CONF = magnum.conf.CONF - - -class DriverList(lister.Lister): - """List templates""" - - def _print_rows(self, parsed_args, rows): - fields = ['name'] - field_labels = ['Name'] - - if parsed_args.details: - fields.extend(['server_type', 'os', 'coe']) - field_labels.extend(['Server_Type', 'OS', 'COE']) - if parsed_args.paths: - fields.append('path') - field_labels.append('Template Path') - return field_labels, [tuple([row[field] for field in fields]) - for row in rows] - - def get_parser(self, prog_name): - parser = super(DriverList, self).get_parser(prog_name) - parser.add_argument('-d', '--details', - action='store_true', - dest='details', - help=('display the cluster types provided by ' - 'each template')) - parser.add_argument('-p', '--paths', - action='store_true', - dest='paths', - help='display the path to each template file') - - return parser - - def take_action(self, parsed_args): - rows = [] - - for entry_point, cls in driver.Driver.load_entry_points(): - name = entry_point.name - definition = cls().get_template_definition() - template = dict(name=name, path=definition.template_path) - - if parsed_args.details: - for cluster_type in cls().provides: - row = dict() - row.update(template) - row.update(cluster_type) - rows.append(row) - else: - rows.append(template) - return self._print_rows(parsed_args, rows) - - -class DriverCommandManager(commandmanager.CommandManager): - COMMANDS = { - "list-drivers": DriverList, - } - - def load_commands(self, namespace): - for name, command_class in self.COMMANDS.items(): - self.add_command(name, command_class) - - -class DriverManager(app.App): - def __init__(self): - super(DriverManager, self).__init__( - description='Magnum Driver Manager', - version=version.version_info, - command_manager=DriverCommandManager(None), - deferred_help=True) - - -def main(args=None): - if args is None: - args = sys.argv[1:] - CONF([], - project='magnum', - version=version.version_info.release_string()) - return DriverManager().run(args) diff --git a/magnum/common/__init__.py b/magnum/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/common/cert_manager/__init__.py b/magnum/common/cert_manager/__init__.py deleted file mode 100644 index ab2e9baa..00000000 --- a/magnum/common/cert_manager/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015 Rackspace US, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from stevedore import driver - -import magnum.conf - -CONF = magnum.conf.CONF - -_CERT_MANAGER_PLUGIN = None - - -def get_backend(): - global _CERT_MANAGER_PLUGIN - if not _CERT_MANAGER_PLUGIN: - _CERT_MANAGER_PLUGIN = driver.DriverManager( - "magnum.cert_manager.backend", - CONF.certificates.cert_manager_type).driver - return _CERT_MANAGER_PLUGIN diff --git a/magnum/common/cert_manager/barbican_cert_manager.py b/magnum/common/cert_manager/barbican_cert_manager.py deleted file mode 100644 index cdb99934..00000000 --- a/magnum/common/cert_manager/barbican_cert_manager.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2014, 2015 Rackspace US, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from barbicanclient import client as barbican_client -from barbicanclient import exceptions as barbican_exc -from oslo_log import log as logging -from oslo_utils import excutils - -from magnum.common.cert_manager import cert_manager -from magnum.common import clients -from magnum.common import context -from magnum.common import exception as magnum_exc -from magnum.i18n import _ - -LOG = logging.getLogger(__name__) - - -class Cert(cert_manager.Cert): - """Representation of a Cert based on the Barbican CertificateContainer.""" - def __init__(self, cert_container): - if not isinstance(cert_container, - barbican_client.containers.CertificateContainer): - raise TypeError(_( - "Retrieved Barbican Container is not of the correct type " - "(certificate).")) - self._cert_container = cert_container - - # Container secrets are accessed upon query and can return as None, - # don't return the payload if the secret is not available. - - def get_certificate(self): - if self._cert_container.certificate: - return self._cert_container.certificate.payload - - def get_intermediates(self): - if self._cert_container.intermediates: - return self._cert_container.intermediates.payload - - def get_private_key(self): - if self._cert_container.private_key: - return self._cert_container.private_key.payload - - def get_private_key_passphrase(self): - if self._cert_container.private_key_passphrase: - return self._cert_container.private_key_passphrase.payload - - -_ADMIN_OSC = None - - -def get_admin_clients(): - global _ADMIN_OSC - if not _ADMIN_OSC: - _ADMIN_OSC = clients.OpenStackClients( - context.RequestContext(is_admin=True)) - return _ADMIN_OSC - - -class CertManager(cert_manager.CertManager): - """Certificate Manager that wraps the Barbican client API.""" - @staticmethod - def store_cert(certificate, private_key, intermediates=None, - private_key_passphrase=None, expiration=None, - name='Magnum TLS Cert', **kwargs): - """Stores a certificate in the certificate manager. - - :param certificate: PEM encoded TLS certificate - :param private_key: private key for the supplied certificate - :param intermediates: ordered and concatenated intermediate certs - :param private_key_passphrase: optional passphrase for the supplied key - :param expiration: the expiration time of the cert in ISO 8601 format - :param name: a friendly name for the cert - - :returns: the container_ref of the stored cert - :raises Exception: if certificate storage fails - """ - connection = get_admin_clients().barbican() - - LOG.info("Storing certificate container '{0}' in Barbican." - .format(name)) - - certificate_secret = None - private_key_secret = None - intermediates_secret = None - pkp_secret = None - - try: - certificate_secret = connection.secrets.create( - payload=certificate, - expiration=expiration, - name="Certificate" - ) - private_key_secret = connection.secrets.create( - payload=private_key, - expiration=expiration, - name="Private Key" - ) - certificate_container = connection.containers.create_certificate( - name=name, - certificate=certificate_secret, - private_key=private_key_secret - ) - if intermediates: - intermediates_secret = connection.secrets.create( - payload=intermediates, - expiration=expiration, - name="Intermediates" - ) - certificate_container.intermediates = intermediates_secret - if private_key_passphrase: - pkp_secret = connection.secrets.create( - payload=private_key_passphrase, - expiration=expiration, - name="Private Key Passphrase" - ) - certificate_container.private_key_passphrase = pkp_secret - - certificate_container.store() - return certificate_container.container_ref - # Barbican (because of Keystone-middleware) sometimes masks - # exceptions strangely -- this will catch anything that it raises and - # reraise the original exception, while also providing useful - # feedback in the logs for debugging - except magnum_exc.CertificateStorageException: - for secret in [certificate_secret, private_key_secret, - intermediates_secret, pkp_secret]: - if secret and secret.secret_ref: - old_ref = secret.secret_ref - try: - secret.delete() - LOG.info("Deleted secret {0} ({1}) during rollback." - .format(secret.name, old_ref)) - except Exception: - LOG.warning( - "Failed to delete {0} ({1}) during rollback. " - "This is probably not a problem." - .format(secret.name, old_ref)) - with excutils.save_and_reraise_exception(): - LOG.exception("Error storing certificate data") - - @staticmethod - def get_cert(cert_ref, service_name='Magnum', resource_ref=None, - check_only=False, **kwargs): - """Retrieves the specified cert and registers as a consumer. - - :param cert_ref: the UUID of the cert to retrieve - :param service_name: Friendly name for the consuming service - :param resource_ref: Full HATEOAS reference to the consuming resource - :param check_only: Read Certificate data without registering - - :return: Magnum.certificates.common.Cert representation of the - certificate data - :raises Exception: if certificate retrieval fails - """ - connection = get_admin_clients().barbican() - - LOG.info( - "Loading certificate container {0} from Barbican." - .format(cert_ref)) - try: - if check_only: - cert_container = connection.containers.get( - container_ref=cert_ref - ) - else: - cert_container = connection.containers.register_consumer( - container_ref=cert_ref, - name=service_name, - url=resource_ref - ) - return Cert(cert_container) - except barbican_exc.HTTPClientError: - with excutils.save_and_reraise_exception(): - LOG.exception("Error getting {0}".format(cert_ref)) - - @staticmethod - def delete_cert(cert_ref, service_name='Magnum', resource_ref=None, - **kwargs): - """Deletes the specified cert. - - :param cert_ref: the UUID of the cert to delete - :raises Exception: if certificate deletion fails - """ - connection = get_admin_clients().barbican() - - LOG.info( - "Recursively deleting certificate container {0} from Barbican." - .format(cert_ref)) - try: - certificate_container = connection.containers.get(cert_ref) - certificate_container.certificate.delete() - if certificate_container.intermediates: - certificate_container.intermediates.delete() - if certificate_container.private_key_passphrase: - certificate_container.private_key_passphrase.delete() - certificate_container.private_key.delete() - certificate_container.delete() - except barbican_exc.HTTPClientError: - with excutils.save_and_reraise_exception(): - LOG.exception( - "Error recursively deleting certificate container {0}" - .format(cert_ref)) diff --git a/magnum/common/cert_manager/cert_manager.py b/magnum/common/cert_manager/cert_manager.py deleted file mode 100644 index 639d501e..00000000 --- a/magnum/common/cert_manager/cert_manager.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2014, 2015 Rackspace US, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Certificate manager API -""" -import abc - -import six - -from magnum.common.x509 import operations - - -@six.add_metaclass(abc.ABCMeta) -class Cert(object): - """Base class to represent all certificates.""" - - @abc.abstractmethod - def get_certificate(self): - """Returns the certificate.""" - pass - - @abc.abstractmethod - def get_intermediates(self): - """Returns the intermediate certificates.""" - pass - - @abc.abstractmethod - def get_private_key(self): - """Returns the private key for the certificate.""" - pass - - def get_decrypted_private_key(self): - """Returns the decrypted private key for the certificate.""" - return operations.decrypt_key(self.get_private_key(), - self.get_private_key_passphrase()) - - @abc.abstractmethod - def get_private_key_passphrase(self): - """Returns the passphrase for the private key.""" - pass - - -@six.add_metaclass(abc.ABCMeta) -class CertManager(object): - """Base Cert Manager Interface - - A Cert Manager is responsible for managing certificates for TLS. - """ - - @abc.abstractmethod - def store_cert(self, certificate, private_key, intermediates=None, - private_key_passphrase=None, expiration=None, - name='Magnum TLS Cert', **kwargs): - """Stores (i.e., registers) a cert with the cert manager. - - This method stores the specified cert and returns its UUID that - identifies it within the cert manager. - If storage of the certificate data fails, a CertificateStorageException - should be raised. - """ - pass - - @abc.abstractmethod - def get_cert(self, cert_uuid, check_only=False, **kwargs): - """Retrieves the specified cert. - - If check_only is True, don't perform any sort of registration. - If the specified cert does not exist, a CertificateStorageException - should be raised. - """ - pass - - @abc.abstractmethod - def delete_cert(self, cert_uuid, **kwargs): - """Deletes the specified cert. - - If the specified cert does not exist, a CertificateStorageException - should be raised. - """ - pass diff --git a/magnum/common/cert_manager/local_cert_manager.py b/magnum/common/cert_manager/local_cert_manager.py deleted file mode 100644 index cc884f75..00000000 --- a/magnum/common/cert_manager/local_cert_manager.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2014, 2015 Rackspace US, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from os import path -import uuid - -from oslo_log import log as logging - -from magnum.common.cert_manager import cert_manager -from magnum.common import exception -import magnum.conf -from magnum.i18n import _ - -LOG = logging.getLogger(__name__) - -CONF = magnum.conf.CONF - - -class Cert(cert_manager.Cert): - """Representation of a Cert for local storage.""" - def __init__(self, certificate, private_key, intermediates=None, - private_key_passphrase=None): - self.certificate = certificate - self.intermediates = intermediates - self.private_key = private_key - self.private_key_passphrase = private_key_passphrase - - def get_certificate(self): - return self.certificate - - def get_intermediates(self): - return self.intermediates - - def get_private_key(self): - return self.private_key - - def get_private_key_passphrase(self): - return self.private_key_passphrase - - -class CertManager(cert_manager.CertManager): - """Cert Manager Interface that stores data locally. - - This Cert Manager should be used for testing purpose. - """ - - @staticmethod - def store_cert(certificate, private_key, intermediates=None, - private_key_passphrase=None, **kwargs): - """Stores (i.e., registers) a cert with the cert manager. - - This method stores the specified cert to the filesystem and returns - a UUID that can be used to retrieve it. - - :param certificate: PEM encoded TLS certificate - :param private_key: private key for the supplied certificate - :param intermediates: ordered and concatenated intermediate certs - :param private_key_passphrase: optional passphrase for the supplied key - - :returns: the UUID of the stored cert - :raises CertificateStorageException: if certificate storage fails - """ - cert_ref = str(uuid.uuid4()) - filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) - - LOG.warning( - "Storing certificate data on the local filesystem. " - "CertManager type 'local' should be used for testing purpose." - ) - try: - filename_certificate = "{0}.crt".format(filename_base) - with open(filename_certificate, 'w') as cert_file: - cert_file.write(certificate) - - filename_private_key = "{0}.key".format(filename_base) - with open(filename_private_key, 'w') as key_file: - key_file.write(private_key) - - if intermediates: - filename_intermediates = "{0}.int".format(filename_base) - with open(filename_intermediates, 'w') as int_file: - int_file.write(intermediates) - - if private_key_passphrase: - filename_pkp = "{0}.pass".format(filename_base) - with open(filename_pkp, 'w') as pass_file: - pass_file.write(private_key_passphrase) - except IOError as ioe: - LOG.error("Failed to store certificate.") - raise exception.CertificateStorageException(msg=str(ioe)) - - return cert_ref - - @staticmethod - def get_cert(cert_ref, **kwargs): - """Retrieves the specified cert. - - :param cert_ref: the UUID of the cert to retrieve - - :return: magnum.common.cert_manager.cert_manager.Cert - representation of the certificate data - :raises CertificateStorageException: if certificate retrieval fails - """ - LOG.warning( - "Loading certificate {0} from the local filesystem. " - "CertManager type 'local' should be used for testing purpose." - .format(cert_ref)) - - filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) - - filename_certificate = "{0}.crt".format(filename_base) - filename_private_key = "{0}.key".format(filename_base) - filename_intermediates = "{0}.int".format(filename_base) - filename_pkp = "{0}.pass".format(filename_base) - - cert_data = dict() - - try: - with open(filename_certificate, 'r') as cert_file: - cert_data['certificate'] = cert_file.read() - except IOError: - LOG.error( - "Failed to read certificate for {0}." - .format(cert_ref)) - raise exception.CertificateStorageException( - msg=_("Certificate could not be read.") - ) - try: - with open(filename_private_key, 'r') as key_file: - cert_data['private_key'] = key_file.read() - except IOError: - LOG.error( - "Failed to read private key for {0}." - .format(cert_ref)) - raise exception.CertificateStorageException( - msg=_("Private Key could not be read.") - ) - - try: - if path.isfile(filename_intermediates): - with open(filename_intermediates, 'r') as int_file: - cert_data['intermediates'] = int_file.read() - except IOError as ioe: - LOG.error("Failed to read certificate.") - raise exception.CertificateStorageException(msg=str(ioe)) - - try: - if path.isfile(filename_pkp): - with open(filename_pkp, 'r') as pass_file: - cert_data['private_key_passphrase'] = pass_file.read() - except IOError as ioe: - LOG.error("Failed to read certificate.") - raise exception.CertificateStorageException(msg=str(ioe)) - - return Cert(**cert_data) - - @staticmethod - def delete_cert(cert_ref, **kwargs): - """Deletes the specified cert. - - :param cert_ref: the UUID of the cert to delete - - :raises CertificateStorageException: if certificate deletion fails - """ - LOG.warning( - "Deleting certificate {0} from the local filesystem. " - "CertManager type 'local' should be used for testing purpose." - .format(cert_ref)) - - filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) - - filename_certificate = "{0}.crt".format(filename_base) - filename_private_key = "{0}.key".format(filename_base) - filename_intermediates = "{0}.int".format(filename_base) - filename_pkp = "{0}.pass".format(filename_base) - - try: - os.remove(filename_certificate) - os.remove(filename_private_key) - if path.isfile(filename_intermediates): - os.remove(filename_intermediates) - if path.isfile(filename_pkp): - os.remove(filename_pkp) - except IOError as ioe: - LOG.error( - "Failed to delete certificate {0}." - .format(cert_ref)) - raise exception.CertificateStorageException(msg=str(ioe)) diff --git a/magnum/common/cert_manager/x509keypair_cert_manager.py b/magnum/common/cert_manager/x509keypair_cert_manager.py deleted file mode 100644 index cce86229..00000000 --- a/magnum/common/cert_manager/x509keypair_cert_manager.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) 2016 Intel, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.common.cert_manager import cert_manager -from magnum import objects - - -class Cert(cert_manager.Cert): - """Representation of a Cert for Magnum DB storage.""" - def __init__(self, certificate, private_key, intermediates=None, - private_key_passphrase=None): - self.certificate = certificate - self.intermediates = intermediates - self.private_key = private_key - self.private_key_passphrase = private_key_passphrase - - def get_certificate(self): - return self.certificate - - def get_intermediates(self): - return self.intermediates - - def get_private_key(self): - return self.private_key - - def get_private_key_passphrase(self): - return self.private_key_passphrase - - -class CertManager(cert_manager.CertManager): - """Cert Manager Interface that stores data locally in Magnum db. - - """ - - @staticmethod - def store_cert(certificate, private_key, intermediates=None, - private_key_passphrase=None, context=None, **kwargs): - """Stores (i.e., registers) a cert with the cert manager. - - This method stores the specified cert to x509keypair model and returns - a UUID that can be used to retrieve it. - - :param certificate: PEM encoded TLS certificate - :param private_key: private key for the supplied certificate - :param intermediates: ordered and concatenated intermediate certs - :param private_key_passphrase: optional passphrase for the supplied key - - :returns: the UUID of the stored cert - """ - x509keypair = {'certificate': certificate, 'private_key': private_key, - 'private_key_passphrase': private_key_passphrase, - 'intermediates': intermediates, - 'project_id': context.project_id, - 'user_id': context.user_id} - x509keypair_obj = objects.X509KeyPair(context, **x509keypair) - x509keypair_obj.create() - return x509keypair_obj.uuid - - @staticmethod - def get_cert(cert_ref, context=None, **kwargs): - """Retrieves the specified cert. - - :param cert_ref: the UUID of the cert to retrieve - - :return: magnum.common.cert_manager.cert_manager.Cert - representation of the certificate data - """ - cert_data = dict() - x509keypair_obj = objects.X509KeyPair.get_by_uuid(context, cert_ref) - cert_data['certificate'] = x509keypair_obj.certificate - cert_data['private_key'] = x509keypair_obj.private_key - cert_data['private_key_passphrase'] = \ - x509keypair_obj.private_key_passphrase - cert_data['intermediates'] = x509keypair_obj.intermediates - return Cert(**cert_data) - - @staticmethod - def delete_cert(cert_ref, context=None, **kwargs): - """Deletes the specified cert. - - :param cert_ref: the UUID of the cert to delete - """ - x509keypair_obj = objects.X509KeyPair.get_by_uuid(context, cert_ref) - x509keypair_obj.destroy() diff --git a/magnum/common/clients.py b/magnum/common/clients.py deleted file mode 100644 index 86fa5996..00000000 --- a/magnum/common/clients.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2014 - Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from barbicanclient import client as barbicanclient -from glanceclient import client as glanceclient -from heatclient import client as heatclient -from keystoneauth1.exceptions import catalog -from neutronclient.v2_0 import client as neutronclient -from novaclient import client as novaclient -from oslo_log import log as logging - -from magnum.common import exception -from magnum.common import keystone -import magnum.conf - -CONF = magnum.conf.CONF -LOG = logging.getLogger(__name__) - - -class OpenStackClients(object): - """Convenience class to create and cache client instances.""" - - def __init__(self, context): - self.context = context - self._keystone = None - self._heat = None - self._glance = None - self._barbican = None - self._nova = None - self._neutron = None - - def url_for(self, **kwargs): - return self.keystone().session.get_endpoint(**kwargs) - - def magnum_url(self): - endpoint_type = self._get_client_option('magnum', 'endpoint_type') - region_name = self._get_client_option('magnum', 'region_name') - try: - return self.url_for(service_type='container-infra', - interface=endpoint_type, - region_name=region_name) - except catalog.EndpointNotFound: - url = self.url_for(service_type='container', - interface=endpoint_type, - region_name=region_name) - LOG.warning('Service type "container" is deprecated and will ' - 'be removed in a subsequent release') - return url - - def cinder_region_name(self): - cinder_region_name = self._get_client_option('cinder', 'region_name') - return self.keystone().get_validate_region_name(cinder_region_name) - - @property - def auth_url(self): - return self.keystone().auth_url - - @property - def auth_token(self): - return self.context.auth_token or self.keystone().auth_token - - def keystone(self): - if self._keystone: - return self._keystone - - self._keystone = keystone.KeystoneClientV3(self.context) - return self._keystone - - def _get_client_option(self, client, option): - return getattr(getattr(CONF, '%s_client' % client), option) - - @exception.wrap_keystone_exception - def heat(self): - if self._heat: - return self._heat - - endpoint_type = self._get_client_option('heat', 'endpoint_type') - region_name = self._get_client_option('heat', 'region_name') - heatclient_version = self._get_client_option('heat', 'api_version') - endpoint = self.url_for(service_type='orchestration', - interface=endpoint_type, - region_name=region_name) - - args = { - 'endpoint': endpoint, - 'auth_url': self.auth_url, - 'token': self.auth_token, - 'username': None, - 'password': None, - 'ca_file': self._get_client_option('heat', 'ca_file'), - 'cert_file': self._get_client_option('heat', 'cert_file'), - 'key_file': self._get_client_option('heat', 'key_file'), - 'insecure': self._get_client_option('heat', 'insecure') - } - self._heat = heatclient.Client(heatclient_version, **args) - - return self._heat - - @exception.wrap_keystone_exception - def glance(self): - if self._glance: - return self._glance - - endpoint_type = self._get_client_option('glance', 'endpoint_type') - region_name = self._get_client_option('glance', 'region_name') - glanceclient_version = self._get_client_option('glance', 'api_version') - endpoint = self.url_for(service_type='image', - interface=endpoint_type, - region_name=region_name) - args = { - 'endpoint': endpoint, - 'auth_url': self.auth_url, - 'token': self.auth_token, - 'username': None, - 'password': None, - 'cacert': self._get_client_option('glance', 'ca_file'), - 'cert': self._get_client_option('glance', 'cert_file'), - 'key': self._get_client_option('glance', 'key_file'), - 'insecure': self._get_client_option('glance', 'insecure') - } - self._glance = glanceclient.Client(glanceclient_version, **args) - - return self._glance - - @exception.wrap_keystone_exception - def barbican(self): - if self._barbican: - return self._barbican - - endpoint_type = self._get_client_option('barbican', 'endpoint_type') - region_name = self._get_client_option('barbican', 'region_name') - endpoint = self.url_for(service_type='key-manager', - interface=endpoint_type, - region_name=region_name) - session = self.keystone().session - self._barbican = barbicanclient.Client(session=session, - endpoint=endpoint) - - return self._barbican - - @exception.wrap_keystone_exception - def nova(self): - if self._nova: - return self._nova - endpoint_type = self._get_client_option('nova', 'endpoint_type') - region_name = self._get_client_option('nova', 'region_name') - novaclient_version = self._get_client_option('nova', 'api_version') - endpoint = self.url_for(service_type='compute', - interface=endpoint_type, - region_name=region_name) - args = { - 'cacert': self._get_client_option('nova', 'ca_file'), - 'insecure': self._get_client_option('nova', 'insecure') - } - - session = self.keystone().session - self._nova = novaclient.Client(novaclient_version, - session=session, - endpoint_override=endpoint, **args) - return self._nova - - @exception.wrap_keystone_exception - def neutron(self): - if self._neutron: - return self._neutron - endpoint_type = self._get_client_option('neutron', 'endpoint_type') - region_name = self._get_client_option('neutron', 'region_name') - endpoint = self.url_for(service_type='network', - interface=endpoint_type, - region_name=region_name) - - args = { - 'auth_url': self.auth_url, - 'token': self.auth_token, - 'endpoint_url': endpoint, - 'endpoint_type': endpoint_type, - 'ca_cert': self._get_client_option('neutron', 'ca_file'), - 'insecure': self._get_client_option('neutron', 'insecure') - } - self._neutron = neutronclient.Client(**args) - return self._neutron diff --git a/magnum/common/config.py b/magnum/common/config.py deleted file mode 100644 index c8464533..00000000 --- a/magnum/common/config.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_middleware import cors - -from magnum.common import rpc -import magnum.conf -from magnum import version - -CONF = magnum.conf.CONF - - -def parse_args(argv, default_config_files=None): - rpc.set_defaults(control_exchange='magnum') - CONF(argv[1:], - project='magnum', - version=version.version_info.release_string(), - default_config_files=default_config_files) - rpc.init(CONF) - - -def set_config_defaults(): - """This method updates all configuration default values.""" - set_cors_middleware_defaults() - - -def set_cors_middleware_defaults(): - """Update default configuration options for oslo.middleware.""" - cors.set_defaults( - allow_headers=['X-Auth-Token', - 'X-Identity-Status', - 'X-Roles', - 'X-Service-Catalog', - 'X-User-Id', - 'X-Tenant-Id', - 'X-OpenStack-Request-ID', - 'X-Server-Management-Url'], - expose_headers=['X-Auth-Token', - 'X-Subject-Token', - 'X-Service-Token', - 'X-OpenStack-Request-ID', - 'X-Server-Management-Url'], - allow_methods=['GET', - 'PUT', - 'POST', - 'DELETE', - 'PATCH'] - ) diff --git a/magnum/common/context.py b/magnum/common/context.py deleted file mode 100644 index dae1afd7..00000000 --- a/magnum/common/context.py +++ /dev/null @@ -1,160 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from eventlet.green import threading -from oslo_context import context - -from magnum.common import policy - -import magnum.conf - -CONF = magnum.conf.CONF - - -class RequestContext(context.RequestContext): - """Extends security contexts from the OpenStack common library.""" - - def __init__(self, auth_token=None, auth_url=None, domain_id=None, - domain_name=None, user_name=None, user_id=None, - user_domain_name=None, user_domain_id=None, - project_name=None, project_id=None, roles=None, - is_admin=None, read_only=False, show_deleted=False, - request_id=None, trust_id=None, auth_token_info=None, - all_tenants=False, password=None, **kwargs): - """Stores several additional request parameters: - - :param domain_id: The ID of the domain. - :param domain_name: The name of the domain. - :param user_domain_id: The ID of the domain to - authenticate a user against. - :param user_domain_name: The name of the domain to - authenticate a user against. - - """ - super(RequestContext, self).__init__(auth_token=auth_token, - user=user_name, - tenant=project_name, - is_admin=is_admin, - read_only=read_only, - show_deleted=show_deleted, - request_id=request_id, - roles=roles) - - self.user_name = user_name - self.user_id = user_id - self.project_name = project_name - self.project_id = project_id - self.domain_id = domain_id - self.domain_name = domain_name - self.user_domain_id = user_domain_id - self.user_domain_name = user_domain_name - self.auth_url = auth_url - self.auth_token_info = auth_token_info - self.trust_id = trust_id - self.all_tenants = all_tenants - self.password = password - if is_admin is None: - self.is_admin = policy.check_is_admin(self) - else: - self.is_admin = is_admin - - def to_dict(self): - value = super(RequestContext, self).to_dict() - value.update({'auth_token': self.auth_token, - 'auth_url': self.auth_url, - 'domain_id': self.domain_id, - 'domain_name': self.domain_name, - 'user_domain_id': self.user_domain_id, - 'user_domain_name': self.user_domain_name, - 'user_name': self.user_name, - 'user_id': self.user_id, - 'project_name': self.project_name, - 'project_id': self.project_id, - 'is_admin': self.is_admin, - 'read_only': self.read_only, - 'roles': self.roles, - 'show_deleted': self.show_deleted, - 'request_id': self.request_id, - 'trust_id': self.trust_id, - 'auth_token_info': self.auth_token_info, - 'password': self.password, - 'all_tenants': self.all_tenants}) - return value - - @classmethod - def from_dict(cls, values): - return cls(**values) - - -def make_context(*args, **kwargs): - return RequestContext(*args, **kwargs) - - -def make_admin_context(show_deleted=False, all_tenants=False): - """Create an administrator context. - - :param show_deleted: if True, will show deleted items when query db - """ - context = RequestContext(user_id=None, - project=None, - is_admin=True, - show_deleted=show_deleted, - all_tenants=all_tenants) - return context - - -def make_cluster_context(cluster, show_deleted=False): - """Create a user context based on a cluster's stored Keystone trust. - - :param cluster: the cluster supplying the Keystone trust to use - :param show_deleted: if True, will show deleted items when query db - """ - context = RequestContext(user_name=cluster.trustee_username, - password=cluster.trustee_password, - trust_id=cluster.trust_id, - show_deleted=show_deleted, - user_domain_id=CONF.trust.trustee_domain_id, - user_domain_name=CONF.trust.trustee_domain_name) - return context - - -_CTX_STORE = threading.local() -_CTX_KEY = 'current_ctx' - - -def has_ctx(): - return hasattr(_CTX_STORE, _CTX_KEY) - - -def ctx(): - return getattr(_CTX_STORE, _CTX_KEY) - - -def set_ctx(new_ctx): - if not new_ctx and has_ctx(): - delattr(_CTX_STORE, _CTX_KEY) - if hasattr(context._request_store, 'context'): - delattr(context._request_store, 'context') - - if new_ctx: - setattr(_CTX_STORE, _CTX_KEY, new_ctx) - setattr(context._request_store, 'context', new_ctx) - - -def get_admin_context(read_deleted="no"): - # NOTE(tovin07): This method should only be used when an admin context is - # necessary for the entirety of the context lifetime. - return RequestContext(user_id=None, - project_id=None, - is_admin=True, - read_deleted=read_deleted, - overwrite=False) diff --git a/magnum/common/docker_utils.py b/magnum/common/docker_utils.py deleted file mode 100644 index 5d93449c..00000000 --- a/magnum/common/docker_utils.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2015 Rackspace All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import contextlib - -import docker -from docker.utils import utils - -from magnum.conductor.handlers.common import cert_manager -from magnum.conductor import utils as conductor_utils -import magnum.conf - - -CONF = magnum.conf.CONF - - -def parse_docker_image(image): - image_parts = image.split(':', 1) - - image_repo = image_parts[0] - image_tag = None - - if len(image_parts) > 1: - image_tag = image_parts[1] - - return image_repo, image_tag - - -def is_docker_library_version_atleast(version): - if utils.compare_version(docker.version, version) <= 0: - return True - return False - - -def is_docker_api_version_atleast(docker, version): - if utils.compare_version(docker.version()['ApiVersion'], version) <= 0: - return True - return False - - -@contextlib.contextmanager -def docker_for_cluster(context, cluster): - cluster_template = conductor_utils.retrieve_cluster_template( - context, cluster) - - ca_cert, magnum_key, magnum_cert = None, None, None - client_kwargs = dict() - if not cluster_template.tls_disabled: - (ca_cert, magnum_key, - magnum_cert) = cert_manager.create_client_files(cluster, context) - client_kwargs['ca_cert'] = ca_cert.name - client_kwargs['client_key'] = magnum_key.name - client_kwargs['client_cert'] = magnum_cert.name - - yield DockerHTTPClient( - cluster.api_address, - CONF.docker.docker_remote_api_version, - CONF.docker.default_timeout, - **client_kwargs - ) - - if ca_cert: - ca_cert.close() - if magnum_key: - magnum_key.close() - if magnum_cert: - magnum_cert.close() - - -class DockerHTTPClient(docker.APIClient): - def __init__(self, url='unix://var/run/docker.sock', - ver=CONF.docker.docker_remote_api_version, - timeout=CONF.docker.default_timeout, - ca_cert=None, - client_key=None, - client_cert=None): - - if ca_cert and client_key and client_cert: - ssl_config = docker.tls.TLSConfig( - client_cert=(client_cert, client_key), - verify=ca_cert, - assert_hostname=False, - ) - else: - ssl_config = False - - super(DockerHTTPClient, self).__init__( - base_url=url, - version=ver, - timeout=timeout, - tls=ssl_config - ) - - def list_instances(self, inspect=False): - res = [] - for container in self.containers(all=True): - info = self.inspect_container(container['Id']) - if not info: - continue - if inspect: - res.append(info) - else: - res.append(info['Config'].get('Hostname')) - return res diff --git a/magnum/common/exception.py b/magnum/common/exception.py deleted file mode 100755 index 464f0ba5..00000000 --- a/magnum/common/exception.py +++ /dev/null @@ -1,376 +0,0 @@ -# Copyright 2013 - Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Magnum base exception handling. - -Includes decorator for re-raising Magnum-type exceptions. - -""" - -import functools -import sys - -from keystoneclient import exceptions as keystone_exceptions -from oslo_config import cfg -from oslo_log import log as logging -import six - -import magnum.conf -from magnum.i18n import _ - - -LOG = logging.getLogger(__name__) - -CONF = magnum.conf.CONF - -try: - CONF.import_opt('fatal_exception_format_errors', - 'oslo_versionedobjects.exception') -except cfg.NoSuchOptError as e: - # Note:work around for magnum run against master branch - # in devstack gate job, as magnum not branched yet - # verisonobjects kilo/master different version can - # cause issue here. As it changed import group. So - # add here before branch to prevent gate failure. - # Bug: #1447873 - CONF.import_opt('fatal_exception_format_errors', - 'oslo_versionedobjects.exception', - group='oslo_versionedobjects') - - -def wrap_keystone_exception(func): - """Wrap keystone exceptions and throw Magnum specific exceptions.""" - @functools.wraps(func) - def wrapped(*args, **kw): - try: - return func(*args, **kw) - except keystone_exceptions.AuthorizationFailure: - raise AuthorizationFailure( - client=func.__name__, message="reason: %s" % sys.exc_info()[1]) - except keystone_exceptions.ClientException: - raise AuthorizationFailure( - client=func.__name__, - message="unexpected keystone client error occurred: %s" - % sys.exc_info()[1]) - return wrapped - - -class MagnumException(Exception): - """Base Magnum Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - - """ - message = _("An unknown exception occurred.") - code = 500 - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if 'code' not in self.kwargs and hasattr(self, 'code'): - self.kwargs['code'] = self.code - - if message: - self.message = message - - try: - self.message = self.message % kwargs - except Exception: - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception('Exception in string format operation, ' - 'kwargs: %s', kwargs) - try: - if CONF.fatal_exception_format_errors: - raise - except cfg.NoSuchOptError: - # Note: work around for Bug: #1447873 - if CONF.oslo_versionedobjects.fatal_exception_format_errors: - raise - - super(MagnumException, self).__init__(self.message) - - def __str__(self): - if six.PY3: - return self.message - return self.message.encode('utf-8') - - def __unicode__(self): - return self.message - - def format_message(self): - if self.__class__.__name__.endswith('_Remote'): - return self.args[0] - else: - return six.text_type(self) - - -class ObjectNotFound(MagnumException): - message = _("The %(name)s %(id)s could not be found.") - code = 404 - - -class ProjectNotFound(ObjectNotFound): - message = _("The %(name)s %(id)s could not be found.") - - -class ResourceNotFound(ObjectNotFound): - message = _("The %(name)s resource %(id)s could not be found.") - - -class AuthorizationFailure(MagnumException): - message = _("%(client)s connection failed. %(message)s") - code = 403 - - -class Invalid(MagnumException): - message = _("Unacceptable parameters.") - code = 400 - - -class InvalidUUID(Invalid): - message = _("Expected a uuid but received %(uuid)s.") - - -class InvalidName(Invalid): - message = _("Expected a name but received %(name)s.") - - -class InvalidDiscoveryURL(Invalid): - message = _("Received invalid discovery URL '%(discovery_url)s' for " - "discovery endpoint '%(discovery_endpoint)s'.") - - -class GetDiscoveryUrlFailed(MagnumException): - message = _("Failed to get discovery url from '%(discovery_endpoint)s'.") - - -class InvalidClusterDiscoveryURL(Invalid): - message = _("Invalid discovery URL '%(discovery_url)s'.") - - -class InvalidClusterSize(Invalid): - message = _("Expected cluster size %(expect_size)d but get cluster " - "size %(size)d from '%(discovery_url)s'.") - - -class GetClusterSizeFailed(MagnumException): - message = _("Failed to get the size of cluster from '%(discovery_url)s'.") - - -class InvalidIdentity(Invalid): - message = _("Expected an uuid or int but received %(identity)s.") - - -class InvalidCsr(Invalid): - message = _("Received invalid csr %(csr)s.") - - -class InvalidSubnet(Invalid): - message = _("Received invalid subnet %(subnet)s.") - - -class HTTPNotFound(ResourceNotFound): - pass - - -class Conflict(MagnumException): - message = _('Conflict.') - code = 409 - - -class ApiVersionsIntersect(Invalid): - message = _("Version of %(name)s %(min_ver)s %(max_ver)s intersects " - "with another versions.") - - -# Cannot be templated as the error syntax varies. -# msg needs to be constructed when raised. -class InvalidParameterValue(Invalid): - message = _("%(err)s") - - -class PatchError(Invalid): - message = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") - - -class NotAuthorized(MagnumException): - message = _("Not authorized.") - code = 403 - - -class PolicyNotAuthorized(NotAuthorized): - message = _("Policy doesn't allow %(action)s to be performed.") - - -class InvalidMAC(Invalid): - message = _("Expected a MAC address but received %(mac)s.") - - -class ConfigInvalid(Invalid): - message = _("Invalid configuration file. %(error_msg)s") - - -class ClusterTemplateNotFound(ResourceNotFound): - message = _("ClusterTemplate %(clustertemplate)s could not be found.") - - -class ClusterTemplateAlreadyExists(Conflict): - message = _("A ClusterTemplate with UUID %(uuid)s already exists.") - - -class ClusterTemplateReferenced(Invalid): - message = _("ClusterTemplate %(clustertemplate)s is referenced by one or" - " multiple clusters.") - - -class ClusterTemplatePublishDenied(NotAuthorized): - message = _("Not authorized to set public flag for cluster template.") - - -class ClusterNotFound(ResourceNotFound): - message = _("Cluster %(cluster)s could not be found.") - - -class ClusterAlreadyExists(Conflict): - message = _("A cluster with UUID %(uuid)s already exists.") - - -class NotSupported(MagnumException): - message = _("%(operation)s is not supported.") - code = 400 - - -class ClusterTypeNotSupported(NotSupported): - message = _("Cluster type (%(server_type)s, %(os)s, %(coe)s)" - " not supported.") - - -class RequiredParameterNotProvided(Invalid): - message = _("Required parameter %(heat_param)s not provided.") - - -class OperationInProgress(Invalid): - message = _("Cluster %(cluster_name)s already has an operation in " - "progress.") - - -class ImageNotFound(ResourceNotFound): - """The code here changed to 400 according to the latest document.""" - message = _("Image %(image_id)s could not be found.") - code = 400 - - -class ImageNotAuthorized(NotAuthorized): - message = _("Not authorized for image %(image_id)s.") - - -class OSDistroFieldNotFound(ResourceNotFound): - """The code here changed to 400 according to the latest document.""" - message = _("Image %(image_id)s doesn't contain os_distro field.") - code = 400 - - -class X509KeyPairNotFound(ResourceNotFound): - message = _("A key pair %(x509keypair)s could not be found.") - - -class X509KeyPairAlreadyExists(Conflict): - message = _("A key pair with UUID %(uuid)s already exists.") - - -class CertificateStorageException(MagnumException): - message = _("Could not store certificate: %(msg)s") - - -class CertificateValidationError(Invalid): - message = _("Extension '%(extension)s' not allowed") - - -class KeyPairNotFound(ResourceNotFound): - message = _("Unable to find keypair %(keypair)s.") - - -class MagnumServiceNotFound(ResourceNotFound): - message = _("A magnum service %(magnum_service_id)s could not be found.") - - -class MagnumServiceAlreadyExists(Conflict): - message = _("A magnum service with ID %(id)s already exists.") - - -class UnsupportedK8sQuantityFormat(Invalid): - message = _("Unsupported quantity format for k8s cluster.") - - -class UnsupportedDockerQuantityFormat(Invalid): - message = _("Unsupported quantity format for Swarm cluster.") - - -class FlavorNotFound(ResourceNotFound): - """The code here changed to 400 according to the latest document.""" - message = _("Unable to find flavor %(flavor)s.") - code = 400 - - -class ExternalNetworkNotFound(ResourceNotFound): - """The code here changed to 400 according to the latest document.""" - """"Ensure the network is not private.""" - message = _("Unable to find external network %(network)s.") - code = 400 - - -class TrustCreateFailed(MagnumException): - message = _("Failed to create trust for trustee %(trustee_user_id)s.") - - -class TrustDeleteFailed(MagnumException): - message = _("Failed to delete trust %(trust_id)s.") - - -class TrusteeCreateFailed(MagnumException): - message = _("Failed to create trustee %(username)s " - "in domain %(domain_id)s") - - -class TrusteeDeleteFailed(MagnumException): - message = _("Failed to delete trustee %(trustee_id)s") - - -class QuotaAlreadyExists(Conflict): - message = _("Quota for project %(project_id)s already exists " - "for resource %(resource)s.") - - -class QuotaNotFound(ResourceNotFound): - message = _("Quota could not be found: %(msg)s") - - -class ResourceLimitExceeded(NotAuthorized): - message = _('Resource limit exceeded: %(msg)s') - - -class RegionsListFailed(MagnumException): - message = _("Failed to list regions.") - - -class TrusteeOrTrustToClusterFailed(MagnumException): - message = _("Failed to create trustee or trust for Cluster: " - "%(cluster_uuid)s") - - -class CertificatesToClusterFailed(MagnumException): - message = _("Failed to create certificates for Cluster: %(cluster_uuid)s") diff --git a/magnum/common/keystone.py b/magnum/common/keystone.py deleted file mode 100755 index b22a8cd0..00000000 --- a/magnum/common/keystone.py +++ /dev/null @@ -1,293 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from keystoneauth1.access import access as ka_access -from keystoneauth1 import exceptions as ka_exception -from keystoneauth1.identity import access as ka_access_plugin -from keystoneauth1.identity import v3 as ka_v3 -from keystoneauth1 import loading as ka_loading -import keystoneclient.exceptions as kc_exception -from keystoneclient.v3 import client as kc_v3 -from oslo_log import log as logging - -from magnum.common import exception -import magnum.conf -from magnum.conf import keystone as ksconf -from magnum.i18n import _ - -CONF = magnum.conf.CONF -LOG = logging.getLogger(__name__) - - -class KeystoneClientV3(object): - """Keystone client wrapper so we can encapsulate logic in one place.""" - - def __init__(self, context): - self.context = context - self._client = None - self._domain_admin_auth = None - self._domain_admin_session = None - self._domain_admin_client = None - self._trustee_domain_id = None - self._session = None - - @property - def auth_url(self): - # FIXME(pauloewerton): auth_url should be retrieved from keystone_auth - # section by default - return CONF[ksconf.CFG_LEGACY_GROUP].auth_uri.replace('v2.0', 'v3') - - @property - def auth_token(self): - return self.session.get_token() - - @property - def session(self): - if self._session: - return self._session - auth = self._get_auth() - session = self._get_session(auth) - self._session = session - return session - - def _get_session(self, auth): - session = ka_loading.load_session_from_conf_options( - CONF, ksconf.CFG_GROUP, auth=auth) - return session - - def _get_auth(self): - if self.context.auth_token_info: - access_info = ka_access.create(body=self.context.auth_token_info, - auth_token=self.context.auth_token) - auth = ka_access_plugin.AccessInfoPlugin(access_info) - elif self.context.auth_token: - auth = ka_v3.Token(auth_url=self.auth_url, - token=self.context.auth_token) - elif self.context.trust_id: - auth_info = { - 'auth_url': self.auth_url, - 'username': self.context.user_name, - 'password': self.context.password, - 'user_domain_id': self.context.user_domain_id, - 'user_domain_name': self.context.user_domain_name, - 'trust_id': self.context.trust_id - } - - auth = ka_v3.Password(**auth_info) - elif self.context.is_admin: - try: - auth = ka_loading.load_auth_from_conf_options( - CONF, ksconf.CFG_GROUP) - except ka_exception.MissingRequiredOptions: - auth = self._get_legacy_auth() - else: - msg = ('Keystone API connection failed: no password, ' - 'trust_id or token found.') - LOG.error(msg) - raise exception.AuthorizationFailure(client='keystone', - message='reason %s' % msg) - - return auth - - def _get_legacy_auth(self): - LOG.warning('Auth plugin and its options for service user ' - 'must be provided in [%(new)s] section. ' - 'Using values from [%(old)s] section is ' - 'deprecated.', {'new': ksconf.CFG_GROUP, - 'old': ksconf.CFG_LEGACY_GROUP}) - - conf = getattr(CONF, ksconf.CFG_LEGACY_GROUP) - - # FIXME(htruta, pauloewerton): Conductor layer does not have - # new v3 variables, such as project_name and project_domain_id. - # The use of admin_* variables is related to Identity API v2.0, - # which is now deprecated. We should also stop using hard-coded - # domain info, as well as variables that refer to `tenant`, - # as they are also v2 related. - auth = ka_v3.Password(auth_url=self.auth_url, - username=conf.admin_user, - password=conf.admin_password, - project_name=conf.admin_tenant_name, - project_domain_id='default', - user_domain_id='default') - return auth - - @property - def client(self): - if self._client: - return self._client - client = kc_v3.Client(session=self.session, - trust_id=self.context.trust_id) - self._client = client - return client - - @property - def domain_admin_auth(self): - user_domain_id = ( - CONF.trust.trustee_domain_admin_domain_id or - CONF.trust.trustee_domain_id - ) - user_domain_name = ( - CONF.trust.trustee_domain_admin_domain_name or - CONF.trust.trustee_domain_name - ) - if not self._domain_admin_auth: - self._domain_admin_auth = ka_v3.Password( - auth_url=self.auth_url, - user_id=CONF.trust.trustee_domain_admin_id, - username=CONF.trust.trustee_domain_admin_name, - user_domain_id=user_domain_id, - user_domain_name=user_domain_name, - domain_id=CONF.trust.trustee_domain_id, - domain_name=CONF.trust.trustee_domain_name, - password=CONF.trust.trustee_domain_admin_password) - return self._domain_admin_auth - - @property - def domain_admin_session(self): - if not self._domain_admin_session: - session = ka_loading.session.Session().load_from_options( - auth=self.domain_admin_auth, - insecure=CONF[ksconf.CFG_LEGACY_GROUP].insecure, - cacert=CONF[ksconf.CFG_LEGACY_GROUP].cafile, - key=CONF[ksconf.CFG_LEGACY_GROUP].keyfile, - cert=CONF[ksconf.CFG_LEGACY_GROUP].certfile) - self._domain_admin_session = session - return self._domain_admin_session - - @property - def domain_admin_client(self): - if not self._domain_admin_client: - self._domain_admin_client = kc_v3.Client( - session=self.domain_admin_session - ) - return self._domain_admin_client - - @property - def trustee_domain_id(self): - if not self._trustee_domain_id: - try: - access = self.domain_admin_auth.get_access( - self.domain_admin_session - ) - except kc_exception.Unauthorized: - msg = "Keystone client authentication failed" - LOG.error(msg) - raise exception.AuthorizationFailure(client='keystone', - message='reason: %s' % - msg) - - self._trustee_domain_id = access.domain_id - - return self._trustee_domain_id - - def create_trust(self, trustee_user): - trustor_user_id = self.session.get_user_id() - trustor_project_id = self.session.get_project_id() - - # inherit the role of the trustor, unless set CONF.trust.roles - if CONF.trust.roles: - roles = CONF.trust.roles - else: - roles = self.context.roles - - try: - trust = self.client.trusts.create( - trustor_user=trustor_user_id, - project=trustor_project_id, - trustee_user=trustee_user, - impersonation=True, - delegation_depth=0, - role_names=roles) - except Exception: - LOG.exception('Failed to create trust') - raise exception.TrustCreateFailed( - trustee_user_id=trustee_user) - return trust - - def delete_trust(self, context, cluster): - if cluster.trust_id is None: - return - - # Trust can only be deleted by the user who creates it. So when - # other users in the same project want to delete the cluster, we need - # use the trustee which can impersonate the trustor to delete the - # trust. - if context.user_id == cluster.user_id: - client = self.client - else: - auth = ka_v3.Password(auth_url=self.auth_url, - user_id=cluster.trustee_user_id, - password=cluster.trustee_password, - trust_id=cluster.trust_id) - - sess = ka_loading.session.Session().load_from_options( - auth=auth, - insecure=CONF[ksconf.CFG_LEGACY_GROUP].insecure, - cacert=CONF[ksconf.CFG_LEGACY_GROUP].cafile, - key=CONF[ksconf.CFG_LEGACY_GROUP].keyfile, - cert=CONF[ksconf.CFG_LEGACY_GROUP].certfile) - client = kc_v3.Client(session=sess) - try: - client.trusts.delete(cluster.trust_id) - except kc_exception.NotFound: - pass - except Exception: - LOG.exception('Failed to delete trust') - raise exception.TrustDeleteFailed(trust_id=cluster.trust_id) - - def create_trustee(self, username, password): - domain_id = self.trustee_domain_id - try: - user = self.domain_admin_client.users.create( - name=username, - password=password, - domain=domain_id) - except Exception: - LOG.exception('Failed to create trustee') - raise exception.TrusteeCreateFailed(username=username, - domain_id=domain_id) - return user - - def delete_trustee(self, trustee_id): - try: - self.domain_admin_client.users.delete(trustee_id) - except kc_exception.NotFound: - pass - except Exception: - LOG.exception('Failed to delete trustee') - raise exception.TrusteeDeleteFailed(trustee_id=trustee_id) - - def get_validate_region_name(self, region_name): - if region_name is None: - message = _("region_name needs to be configured in magnum.conf") - raise exception.InvalidParameterValue(message) - """matches the region of a public endpoint for the Keystone - service.""" - try: - regions = self.client.regions.list() - except kc_exception.NotFound: - pass - except Exception: - LOG.exception('Failed to list regions') - raise exception.RegionsListFailed() - region_list = [] - for region in regions: - region_list.append(region.id) - if region_name not in region_list: - raise exception.InvalidParameterValue(_( - 'region_name %(region_name)s is invalid, ' - 'expecting a region_name in %(region_name_list)s.') % { - 'region_name': region_name, - 'region_name_list': '/'.join( - region_list + ['unspecified'])}) - return region_name diff --git a/magnum/common/name_generator.py b/magnum/common/name_generator.py deleted file mode 100644 index 1847cd60..00000000 --- a/magnum/common/name_generator.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - - -class NameGenerator(object): - letters = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', - 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', - 'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon', - 'phi', 'chi', 'psi', 'omega'] - - def __init__(self): - self.random = random.Random() - - def generate(self): - '''Generate a random name compose of a Greek leter and - - a number, like: beta_2. - ''' - - letter = self.random.choice(self.letters) - number = self.random.randint(1, 24) - - return letter + '-' + str(number) diff --git a/magnum/common/policy.py b/magnum/common/policy.py deleted file mode 100644 index 74d9fb3f..00000000 --- a/magnum/common/policy.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy Engine For magnum.""" - -import decorator -from oslo_config import cfg -from oslo_policy import policy -from oslo_utils import importutils -import pecan - -from magnum.common import clients -from magnum.common import exception - - -_ENFORCER = None -CONF = cfg.CONF - - -# we can get a policy enforcer by this init. -# oslo policy support change policy rule dynamically. -# at present, policy.enforce will reload the policy rules when it checks -# the policy files have been touched. -def init(policy_file=None, rules=None, - default_rule=None, use_conf=True, overwrite=True): - """Init an Enforcer class. - - :param policy_file: Custom policy file to use, if none is - specified, ``conf.policy_file`` will be - used. - :param rules: Default dictionary / Rules to use. It will be - considered just in the first instantiation. If - :meth:`load_rules` with ``force_reload=True``, - :meth:`clear` or :meth:`set_rules` with - ``overwrite=True`` is called this will be overwritten. - :param default_rule: Default rule to use, conf.default_rule will - be used if none is specified. - :param use_conf: Whether to load rules from cache or config file. - :param overwrite: Whether to overwrite existing rules when reload rules - from config file. - """ - global _ENFORCER - if not _ENFORCER: - # http://docs.openstack.org/developer/oslo.policy/usage.html - _ENFORCER = policy.Enforcer(CONF, - policy_file=policy_file, - rules=rules, - default_rule=default_rule, - use_conf=use_conf, - overwrite=overwrite) - return _ENFORCER - - -def enforce(context, rule=None, target=None, - do_raise=True, exc=None, *args, **kwargs): - - """Checks authorization of a rule against the target and credentials. - - :param dict context: As much information about the user performing the - action as possible. - :param rule: The rule to evaluate. - :param dict target: As much information about the object being operated - on as possible. - :param do_raise: Whether to raise an exception or not if check - fails. - :param exc: Class of the exception to raise if the check fails. - Any remaining arguments passed to :meth:`enforce` (both - positional and keyword arguments) will be passed to - the exception class. If not specified, - :class:`PolicyNotAuthorized` will be used. - - :return: ``False`` if the policy does not allow the action and `exc` is - not provided; otherwise, returns a value that evaluates to - ``True``. Note: for rules using the "case" expression, this - ``True`` value will be the specified string from the - expression. - """ - enforcer = init() - credentials = context.to_dict() - if not exc: - exc = exception.PolicyNotAuthorized - if target is None: - target = {'project_id': context.project_id, - 'user_id': context.user_id} - add_policy_attributes(target) - return enforcer.enforce(rule, target, credentials, - do_raise=do_raise, exc=exc, *args, **kwargs) - - -def add_policy_attributes(target): - """Adds extra information for policy enforcement to raw target object""" - context = importutils.import_module('magnum.common.context') - admin_context = context.make_admin_context() - admin_osc = clients.OpenStackClients(admin_context) - trustee_domain_id = admin_osc.keystone().trustee_domain_id - target['trustee_domain_id'] = trustee_domain_id - return target - - -def check_is_admin(context): - """Whether or not user is admin according to policy setting. - - """ - init() - target = {} - credentials = context.to_dict() - return _ENFORCER.enforce('context_is_admin', target, credentials) - - -def enforce_wsgi(api_name, act=None): - """This is a decorator to simplify wsgi action policy rule check. - - :param api_name: The collection name to be evaluate. - :param act: The function name of wsgi action. - - example: - from magnum.common import policy - class ClustersController(rest.RestController): - .... - @policy.enforce_wsgi("cluster", "delete") - @wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204) - def delete(self, cluster_ident): - ... - """ - @decorator.decorator - def wrapper(fn, *args, **kwargs): - action = "%s:%s" % (api_name, (act or fn.__name__)) - enforce(pecan.request.context, action, - exc=exception.PolicyNotAuthorized, action=action) - return fn(*args, **kwargs) - return wrapper diff --git a/magnum/common/profiler.py b/magnum/common/profiler.py deleted file mode 100644 index 5e1b19eb..00000000 --- a/magnum/common/profiler.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2017 Fujitsu Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -### -# This code is taken from nova. Goal is minimal modification. -### - -from oslo_log import log as logging -from oslo_utils import importutils -import webob.dec - -from magnum.common import context -import magnum.conf - -profiler = importutils.try_import("osprofiler.profiler") -profiler_initializer = importutils.try_import("osprofiler.initializer") -profiler_web = importutils.try_import("osprofiler.web") - - -CONF = magnum.conf.CONF - -LOG = logging.getLogger(__name__) - - -class WsgiMiddleware(object): - - def __init__(self, application, **kwargs): - self.application = application - - @classmethod - def factory(cls, global_conf, **local_conf): - if profiler_web: - return profiler_web.WsgiMiddleware.factory(global_conf, - **local_conf) - - def filter_(app): - return cls(app, **local_conf) - - return filter_ - - @webob.dec.wsgify - def __call__(self, request): - return request.get_response(self.application) - - -def setup(binary, host): - if hasattr(CONF, 'profiler') and CONF.profiler.enabled: - profiler_initializer.init_from_conf( - conf=CONF, - context=context.get_admin_context().to_dict(), - project="magnum", - service=binary, - host=host) - LOG.info("OSprofiler is enabled.") - - -def trace_cls(name, **kwargs): - """Wrap the OSprofiler trace_cls. - - Wrap the OSprofiler trace_cls decorator so that it will not try to - patch the class unless OSprofiler is present. - - :param name: The name of action. For example, wsgi, rpc, db, ... - :param kwargs: Any other keyword args used by profiler.trace_cls - """ - - def decorator(cls): - if profiler and 'profiler' in CONF: - trace_decorator = profiler.trace_cls(name, kwargs) - return trace_decorator(cls) - return cls - - return decorator diff --git a/magnum/common/rpc.py b/magnum/common/rpc.py deleted file mode 100644 index d3ff33b3..00000000 --- a/magnum/common/rpc.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'init', - 'cleanup', - 'set_defaults', - 'add_extra_exmods', - 'clear_extra_exmods', - 'get_allowed_exmods', - 'RequestContextSerializer', - 'get_client', - 'get_server', - 'get_notifier', -] - -import socket - - -import oslo_messaging as messaging -from oslo_messaging.rpc import dispatcher -from oslo_serialization import jsonutils -from oslo_utils import importutils - -from magnum.common import context as magnum_context -from magnum.common import exception -import magnum.conf - -profiler = importutils.try_import("osprofiler.profiler") - -CONF = magnum.conf.CONF -TRANSPORT = None -NOTIFIER = None - -ALLOWED_EXMODS = [ - exception.__name__, -] -EXTRA_EXMODS = [] - - -def init(conf): - global TRANSPORT, NOTIFIER - exmods = get_allowed_exmods() - TRANSPORT = messaging.get_rpc_transport(conf, - allowed_remote_exmods=exmods) - serializer = RequestContextSerializer(JsonPayloadSerializer()) - NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) - - -def cleanup(): - global TRANSPORT, NOTIFIER - assert TRANSPORT is not None - assert NOTIFIER is not None - TRANSPORT.cleanup() - TRANSPORT = NOTIFIER = None - - -def set_defaults(control_exchange): - messaging.set_transport_defaults(control_exchange) - - -def add_extra_exmods(*args): - EXTRA_EXMODS.extend(args) - - -def clear_extra_exmods(): - del EXTRA_EXMODS[:] - - -def get_allowed_exmods(): - return ALLOWED_EXMODS + EXTRA_EXMODS - - -class JsonPayloadSerializer(messaging.NoOpSerializer): - @staticmethod - def serialize_entity(context, entity): - return jsonutils.to_primitive(entity, convert_instances=True) - - -class RequestContextSerializer(messaging.Serializer): - - def __init__(self, base): - self._base = base - - def serialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.serialize_entity(context, entity) - - def deserialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.deserialize_entity(context, entity) - - def serialize_context(self, context): - return context.to_dict() - - def deserialize_context(self, context): - return magnum_context.RequestContext.from_dict(context) - - -class ProfilerRequestContextSerializer(RequestContextSerializer): - def serialize_context(self, context): - _context = super(ProfilerRequestContextSerializer, - self).serialize_context(context) - - prof = profiler.get() - if prof: - trace_info = { - "hmac_key": prof.hmac_key, - "base_id": prof.get_base_id(), - "parent_id": prof.get_id() - } - _context.update({"trace_info": trace_info}) - - return _context - - def deserialize_context(self, context): - trace_info = context.pop("trace_info", None) - if trace_info: - profiler.init(**trace_info) - - return super(ProfilerRequestContextSerializer, - self).deserialize_context(context) - - -def get_transport_url(url_str=None): - return messaging.TransportURL.parse(CONF, url_str) - - -def get_client(target, version_cap=None, serializer=None, timeout=None): - assert TRANSPORT is not None - if profiler: - serializer = ProfilerRequestContextSerializer(serializer) - else: - serializer = RequestContextSerializer(serializer) - - return messaging.RPCClient(TRANSPORT, - target, - version_cap=version_cap, - serializer=serializer, - timeout=timeout) - - -def get_server(target, endpoints, serializer=None): - assert TRANSPORT is not None - access_policy = dispatcher.DefaultRPCAccessPolicy - if profiler: - serializer = ProfilerRequestContextSerializer(serializer) - else: - serializer = RequestContextSerializer(serializer) - - return messaging.get_rpc_server(TRANSPORT, - target, - endpoints, - executor='eventlet', - serializer=serializer, - access_policy=access_policy) - - -def get_notifier(service='container-infra', host=None, publisher_id=None): - assert NOTIFIER is not None - myhost = CONF.host - if myhost is None: - myhost = socket.getfqdn() - if not publisher_id: - publisher_id = "%s.%s" % (service, host or myhost) - return NOTIFIER.prepare(publisher_id=publisher_id) diff --git a/magnum/common/rpc_service.py b/magnum/common/rpc_service.py deleted file mode 100644 index 4db1b2ed..00000000 --- a/magnum/common/rpc_service.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2014 - Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Common RPC service and API tools for Magnum.""" - -import oslo_messaging as messaging -from oslo_messaging.rpc import dispatcher -from oslo_service import service -from oslo_utils import importutils - -from magnum.common import profiler -from magnum.common import rpc -import magnum.conf -from magnum.objects import base as objects_base -from magnum.service import periodic -from magnum.servicegroup import magnum_service_periodic as servicegroup - - -osprofiler = importutils.try_import("osprofiler.profiler") - -CONF = magnum.conf.CONF - - -def _init_serializer(): - serializer = rpc.RequestContextSerializer( - objects_base.MagnumObjectSerializer()) - if osprofiler: - serializer = rpc.ProfilerRequestContextSerializer(serializer) - else: - serializer = rpc.RequestContextSerializer(serializer) - return serializer - - -class Service(service.Service): - - def __init__(self, topic, server, handlers, binary): - super(Service, self).__init__() - serializer = _init_serializer() - transport = messaging.get_rpc_transport(CONF) - # TODO(asalkeld) add support for version='x.y' - access_policy = dispatcher.DefaultRPCAccessPolicy - target = messaging.Target(topic=topic, server=server) - self._server = messaging.get_rpc_server(transport, target, handlers, - executor='eventlet', - serializer=serializer, - access_policy=access_policy) - self.binary = binary - profiler.setup(binary, CONF.host) - - def start(self): - # NOTE(suro-patz): The parent class has created a threadgroup, already - if CONF.periodic_enable: - periodic.setup(CONF, self.tg) - servicegroup.setup(CONF, self.binary, self.tg) - self._server.start() - - def stop(self): - if self._server: - self._server.stop() - self._server.wait() - super(Service, self).stop() - - @classmethod - def create(cls, topic, server, handlers, binary): - service_obj = cls(topic, server, handlers, binary) - return service_obj - - -class API(object): - def __init__(self, transport=None, context=None, topic=None, server=None, - timeout=None): - serializer = _init_serializer() - if transport is None: - exmods = rpc.get_allowed_exmods() - transport = messaging.get_rpc_transport( - CONF, allowed_remote_exmods=exmods) - self._context = context - if topic is None: - topic = '' - target = messaging.Target(topic=topic, server=server) - self._client = messaging.RPCClient(transport, target, - serializer=serializer, - timeout=timeout) - - def _call(self, method, *args, **kwargs): - return self._client.call(self._context, method, *args, **kwargs) - - def _cast(self, method, *args, **kwargs): - self._client.cast(self._context, method, *args, **kwargs) - - def echo(self, message): - self._cast('echo', message=message) diff --git a/magnum/common/service.py b/magnum/common/service.py deleted file mode 100644 index d92afdc5..00000000 --- a/magnum/common/service.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2013 - Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging - -from magnum.common import config -import magnum.conf - -CONF = magnum.conf.CONF - - -def prepare_service(argv=None): - if argv is None: - argv = [] - logging.register_options(CONF) - config.parse_args(argv) - config.set_config_defaults() - - logging.setup(CONF, 'magnum') diff --git a/magnum/common/short_id.py b/magnum/common/short_id.py deleted file mode 100644 index 1568259b..00000000 --- a/magnum/common/short_id.py +++ /dev/null @@ -1,62 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities for creating short ID strings based on a random UUID. - -The IDs each comprise 12 (lower-case) alphanumeric characters. -""" - -import base64 -import uuid - -import six - -from magnum.i18n import _ - - -def _to_byte_string(value, num_bits): - """Convert an integer to a big-endian string of bytes with padding. - - Padding is added at the end (i.e. after the least-significant bit) if - required. - """ - shifts = six.moves.xrange(num_bits - 8, -8, -8) - byte_at = lambda off: (value >> off if off >= 0 else value << -off) & 0xff - return ''.join(chr(byte_at(offset)) for offset in shifts) - - -def get_id(source_uuid): - """Derive a short (12 character) id from a random UUID. - - The supplied UUID must be a version 4 UUID object. - """ - if isinstance(source_uuid, six.string_types): - source_uuid = uuid.UUID(source_uuid) - if source_uuid.version != 4: - raise ValueError(_('Invalid UUID version (%d)') % source_uuid.version) - - # The "time" field of a v4 UUID contains 60 random bits - # (see RFC4122, Section 4.4) - random_bytes = _to_byte_string(source_uuid.time, 60) - # The first 12 bytes (= 60 bits) of base32-encoded output is our data - encoded = base64.b32encode(six.b(random_bytes))[:12] - - if six.PY3: - return encoded.lower().decode('utf-8') - else: - return encoded.lower() - - -def generate_id(): - """Generate a short (12 character), random id.""" - return get_id(uuid.uuid4()) diff --git a/magnum/common/urlfetch.py b/magnum/common/urlfetch.py deleted file mode 100755 index b4b42e5e..00000000 --- a/magnum/common/urlfetch.py +++ /dev/null @@ -1,77 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility for fetching a resource (e.g. a manifest) from a URL.""" - -from oslo_log import log as logging -import requests -from requests import exceptions -from six.moves import urllib - -from magnum.common import exception -import magnum.conf -from magnum.i18n import _ - -CONF = magnum.conf.CONF -LOG = logging.getLogger(__name__) - - -class URLFetchError(exception.Invalid, IOError): - pass - - -def get(url, allowed_schemes=('http', 'https')): - """Get the data at the specified URL. - - The URL must use the http: or https: schemes. - The file: scheme is also supported if you override - the allowed_schemes argument. - Raise an IOError if getting the data fails. - """ - LOG.info('Fetching data from %s', url) - - components = urllib.parse.urlparse(url) - - if components.scheme not in allowed_schemes: - raise URLFetchError(_('Invalid URL scheme %s') % components.scheme) - - if components.scheme == 'file': # nosec - try: - return urllib.request.urlopen(url).read() - except urllib.error.URLError as uex: - raise URLFetchError(_('Failed to retrieve manifest: %s') % uex) - - try: - resp = requests.get(url, stream=True) - resp.raise_for_status() - - # We cannot use resp.text here because it would download the - # entire file, and a large enough file would bring down the - # engine. The 'Content-Length' header could be faked, so it's - # necessary to download the content in chunks to until - # max_manifest_size is reached. The chunk_size we use needs - # to balance CPU-intensive string concatenation with accuracy - # (eg. it's possible to fetch 1000 bytes greater than - # max_manifest_size with a chunk_size of 1000). - reader = resp.iter_content(chunk_size=1000) - result = "" - for chunk in reader: - result += chunk - if len(result) > CONF.max_manifest_size: - raise URLFetchError("Manifest exceeds maximum allowed" - "size (%s bytes)" % - CONF.max_manifest_size) - return result - - except exceptions.RequestException as ex: - raise URLFetchError(_('Failed to retrieve manifest: %s') % ex) diff --git a/magnum/common/utils.py b/magnum/common/utils.py deleted file mode 100755 index 9063eae4..00000000 --- a/magnum/common/utils.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# Copyright (c) 2012 NTT DOCOMO, INC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities and helper functions.""" - -import contextlib -import os -import random -import re -import shutil -import tempfile - -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_utils import netutils -import six - -from magnum.common import exception -import magnum.conf - -CONF = magnum.conf.CONF -LOG = logging.getLogger(__name__) - -MEMORY_UNITS = { - 'Ki': 2 ** 10, - 'Mi': 2 ** 20, - 'Gi': 2 ** 30, - 'Ti': 2 ** 40, - 'Pi': 2 ** 50, - 'Ei': 2 ** 60, - 'm': 10 ** -3, - 'k': 10 ** 3, - 'M': 10 ** 6, - 'G': 10 ** 9, - 'T': 10 ** 12, - 'p': 10 ** 15, - 'E': 10 ** 18, - '': 1 -} - -DOCKER_MEMORY_UNITS = { - 'b': 1, - 'k': 2 ** 10, - 'm': 2 ** 20, - 'g': 2 ** 30, -} - - -def _get_root_helper(): - return 'sudo magnum-rootwrap %s' % CONF.rootwrap_config - - -def execute(*cmd, **kwargs): - """Convenience wrapper around oslo's execute() method. - - :param cmd: Passed to processutils.execute. - :param use_standard_locale: True | False. Defaults to False. If set to - True, execute command with standard locale - added to environment variables. - :returns: (stdout, stderr) from process execution - :raises: UnknownArgumentError - :raises: ProcessExecutionError - """ - - use_standard_locale = kwargs.pop('use_standard_locale', False) - if use_standard_locale: - env = kwargs.pop('env_variables', os.environ.copy()) - env['LC_ALL'] = 'C' - kwargs['env_variables'] = env - if kwargs.get('run_as_root') and 'root_helper' not in kwargs: - kwargs['root_helper'] = _get_root_helper() - result = processutils.execute(*cmd, **kwargs) - LOG.debug('Execution completed, command line is "%s"', - ' '.join(map(str, cmd))) - LOG.debug('Command stdout is: "%s"', result[0]) - LOG.debug('Command stderr is: "%s"', result[1]) - return result - - -def trycmd(*args, **kwargs): - """Convenience wrapper around oslo's trycmd() method.""" - if kwargs.get('run_as_root') and 'root_helper' not in kwargs: - kwargs['root_helper'] = _get_root_helper() - return processutils.trycmd(*args, **kwargs) - - -def validate_and_normalize_mac(address): - """Validate a MAC address and return normalized form. - - Checks whether the supplied MAC address is formally correct and - normalize it to all lower case. - - :param address: MAC address to be validated and normalized. - :returns: Normalized and validated MAC address. - :raises: InvalidMAC If the MAC address is not valid. - - """ - if not netutils.is_valid_mac(address): - raise exception.InvalidMAC(mac=address) - return address.lower() - - -@contextlib.contextmanager -def tempdir(**kwargs): - tempfile.tempdir = CONF.tempdir - tmpdir = tempfile.mkdtemp(**kwargs) - try: - yield tmpdir - finally: - try: - shutil.rmtree(tmpdir) - except OSError as e: - LOG.error('Could not remove tmpdir: %s', e) - - -def rmtree_without_raise(path): - try: - if os.path.isdir(path): - shutil.rmtree(path) - except OSError as e: - LOG.warning("Failed to remove dir %(path)s, error: %(e)s", - {'path': path, 'e': e}) - - -def safe_rstrip(value, chars=None): - """Removes trailing characters from a string if that does not make it empty - - :param value: A string value that will be stripped. - :param chars: Characters to remove. - :return: Stripped value. - - """ - if not isinstance(value, six.string_types): - LOG.warning("Failed to remove trailing character. " - "Returning original object. " - "Supplied object is not a string: %s,", value) - return value - - return value.rstrip(chars) or value - - -def is_name_safe(name): - """Checks whether the name is valid or not. - - :param name: name of the resource. - :returns: True, when name is valid - False, otherwise. - """ - # TODO(madhuri): There should be some validation of name. - # Leaving it now as there is no validation - # while resource creation. - # https://bugs.launchpad.net/magnum/+bug/1430617 - if not name: - return False - return True - - -def get_k8s_quantity(quantity): - """This function is used to get k8s quantity. - - It supports to get CPU and Memory quantity: - - Kubernetes cpu format must be in the format of: - - 'm' - for example: - 500m = 0.5 core of cpu - - Kubernetes memory format must be in the format of: - - - signedNumber = digits|digits.digits|digits.|.digits - suffix = Ki|Mi|Gi|Ti|Pi|Ei|m|k|M|G|T|P|E|'' - or suffix = E|e - digits = digit | digit - digit = 0|1|2|3|4|5|6|7|8|9 - - :param name: String value of a quantity such as '500m', '1G' - :returns: Quantity number - :raises: exception.UnsupportedK8sQuantityFormat if the quantity string - is a unsupported value - """ - - signed_num_regex = r"(^\d+\.\d+)|(^\d+\.)|(\.\d+)|(^\d+)" - matched_signed_number = re.search(signed_num_regex, quantity) - if matched_signed_number is None: - raise exception.UnsupportedK8sQuantityFormat() - else: - signed_number = matched_signed_number.group(0) - suffix = quantity.replace(signed_number, '', 1) - if suffix == '': - return float(quantity) - if re.search(r"^(Ki|Mi|Gi|Ti|Pi|Ei|m|k|M|G|T|P|E|'')$", suffix): - return float(signed_number) * MEMORY_UNITS[suffix] - elif re.search(r"^[E|e][+|-]?(\d+\.\d+$)|(\d+\.$)|(\.\d+$)|(\d+$)", - suffix): - return float(signed_number) * (10 ** float(suffix[1:])) - else: - raise exception.UnsupportedK8sQuantityFormat() - - -def get_docker_quantity(quantity): - """This function is used to get swarm Memory quantity. - - Memory format must be in the format of: - - - suffix = b | k | m | g - - eg: 100m = 104857600 - :raises: exception.UnsupportedDockerQuantityFormat if the quantity string - is a unsupported value - """ - matched_unsigned_number = re.search(r"(^\d+)", quantity) - - if matched_unsigned_number is None: - raise exception.UnsupportedDockerQuantityFormat() - else: - unsigned_number = matched_unsigned_number.group(0) - - suffix = quantity.replace(unsigned_number, '', 1) - if suffix == '': - return int(quantity) - - if re.search(r"^(b|k|m|g)$", suffix): - return int(unsigned_number) * DOCKER_MEMORY_UNITS[suffix] - - raise exception.UnsupportedDockerQuantityFormat() - - -def generate_password(length, symbolgroups=None): - """Generate a random password from the supplied symbol groups. - - At least one symbol from each group will be included. Unpredictable - results if length is less than the number of symbol groups. - - Believed to be reasonably secure (with a reasonable password length!) - - """ - - if symbolgroups is None: - symbolgroups = CONF.password_symbols - - r = random.SystemRandom() - - # NOTE(jerdfelt): Some password policies require at least one character - # from each group of symbols, so start off with one random character - # from each symbol group - password = [r.choice(s) for s in symbolgroups] - # If length < len(symbolgroups), the leading characters will only - # be from the first length groups. Try our best to not be predictable - # by shuffling and then truncating. - r.shuffle(password) - password = password[:length] - length -= len(password) - - # then fill with random characters from all symbol groups - symbols = ''.join(symbolgroups) - password.extend([r.choice(symbols) for _i in range(length)]) - - # finally shuffle to ensure first x characters aren't from a - # predictable group - r.shuffle(password) - - return ''.join(password) diff --git a/magnum/common/x509/__init__.py b/magnum/common/x509/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/common/x509/extensions.py b/magnum/common/x509/extensions.py deleted file mode 100644 index 031a4910..00000000 --- a/magnum/common/x509/extensions.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import enum - - -class Extensions(enum.Enum): - __order__ = ('AUTHORITY_KEY_IDENTIFIER SUBJECT_KEY_IDENTIFIER ' - 'AUTHORITY_INFORMATION_ACCESS BASIC_CONSTRAINTS ' - 'CRL_DISTRIBUTION_POINTS CERTIFICATE_POLICIES ' - 'EXTENDED_KEY_USAGE OCSP_NO_CHECK INHIBIT_ANY_POLICY ' - 'KEY_USAGE NAME_CONSTRAINTS SUBJECT_ALTERNATIVE_NAME ' - 'ISSUER_ALTERNATIVE_NAME') - - AUTHORITY_KEY_IDENTIFIER = "authorityKeyIdentifier" - SUBJECT_KEY_IDENTIFIER = "subjectKeyIdentifier" - AUTHORITY_INFORMATION_ACCESS = "authorityInfoAccess" - BASIC_CONSTRAINTS = "basicConstraints" - CRL_DISTRIBUTION_POINTS = "cRLDistributionPoints" - CERTIFICATE_POLICIES = "certificatePolicies" - EXTENDED_KEY_USAGE = "extendedKeyUsage" - OCSP_NO_CHECK = "OCSPNoCheck" - INHIBIT_ANY_POLICY = "inhibitAnyPolicy" - KEY_USAGE = "keyUsage" - NAME_CONSTRAINTS = "nameConstraints" - SUBJECT_ALTERNATIVE_NAME = "subjectAltName" - ISSUER_ALTERNATIVE_NAME = "issuerAltName" - - -class KeyUsages(enum.Enum): - __order__ = ('DIGITAL_SIGNATURE CONTENT_COMMITMENT KEY_ENCIPHERMENT ' - 'DATA_ENCIPHERMENT KEY_AGREEMENT KEY_CERT_SIGN ' - 'CRL_SIGN ENCIPHER_ONLY DECIPHER_ONLY') - - DIGITAL_SIGNATURE = ("Digital Signature", "digital_signature") - CONTENT_COMMITMENT = ("Non Repudiation", "content_commitment") - KEY_ENCIPHERMENT = ("Key Encipherment", "key_encipherment") - DATA_ENCIPHERMENT = ("Data Encipherment", "data_encipherment") - KEY_AGREEMENT = ("Key Agreement", "key_agreement") - KEY_CERT_SIGN = ("Certificate Sign", "key_cert_sign") - CRL_SIGN = ("CRL Sign", "crl_sign") - ENCIPHER_ONLY = ("Encipher Only", "encipher_only") - DECIPHER_ONLY = ("Decipher Only", "decipher_only") diff --git a/magnum/common/x509/operations.py b/magnum/common/x509/operations.py deleted file mode 100644 index 30860a9c..00000000 --- a/magnum/common/x509/operations.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import six -import uuid - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives import serialization -from cryptography import x509 -from oslo_log import log as logging - -from magnum.common import exception -from magnum.common.x509 import validator -import magnum.conf - -LOG = logging.getLogger(__name__) - -CONF = magnum.conf.CONF - - -def generate_ca_certificate(subject_name, encryption_password=None): - """Generate CA Certificate - - :param subject_name: subject name of CA - :param encryption_password: encryption passsword for private key - :returns: generated private key and certificate pair - """ - return _generate_self_signed_certificate( - subject_name, - _build_ca_extentions(), - encryption_password=encryption_password - ) - - -def generate_client_certificate(issuer_name, subject_name, ca_key, - encryption_password=None, - ca_key_password=None): - """Generate Client Certificate - - :param issuer_name: issuer name - :param subject_name: subject name of client - :param ca_key: private key of CA - :param encryption_password: encryption passsword for private key - :param ca_key_password: private key password for given ca key - :returns: generated private key and certificate pair - """ - return _generate_certificate(issuer_name, subject_name, - _build_client_extentions(), ca_key=ca_key, - encryption_password=encryption_password, - ca_key_password=ca_key_password) - - -def _build_client_extentions(): - # Digital Signature and Key Encipherment are enabled - key_usage = x509.KeyUsage(True, False, True, False, False, False, False, - False, False) - key_usage = x509.Extension(key_usage.oid, True, key_usage) - extended_key_usage = x509.ExtendedKeyUsage([x509.OID_CLIENT_AUTH]) - extended_key_usage = x509.Extension(extended_key_usage.oid, False, - extended_key_usage) - basic_constraints = x509.BasicConstraints(ca=False, path_length=None) - basic_constraints = x509.Extension(basic_constraints.oid, True, - basic_constraints) - - return [key_usage, extended_key_usage, basic_constraints] - - -def _build_ca_extentions(): - # Certificate Sign is enabled - key_usage = x509.KeyUsage(False, False, False, False, False, True, False, - False, False) - key_usage = x509.Extension(key_usage.oid, True, key_usage) - basic_constraints = x509.BasicConstraints(ca=True, path_length=0) - basic_constraints = x509.Extension(basic_constraints.oid, True, - basic_constraints) - - return [basic_constraints, key_usage] - - -def _generate_self_signed_certificate(subject_name, extensions, - encryption_password=None): - return _generate_certificate(subject_name, subject_name, extensions, - encryption_password=encryption_password) - - -def _generate_certificate(issuer_name, subject_name, extensions, ca_key=None, - encryption_password=None, ca_key_password=None): - - if not isinstance(subject_name, six.text_type): - subject_name = six.text_type(subject_name.decode('utf-8')) - - private_key = rsa.generate_private_key( - public_exponent=65537, - key_size=CONF.x509.rsa_key_size, - backend=default_backend() - ) - - # subject name is set as common name - csr = x509.CertificateSigningRequestBuilder() - csr = csr.subject_name(x509.Name([ - x509.NameAttribute(x509.OID_COMMON_NAME, subject_name), - ])) - - for extention in extensions: - csr = csr.add_extension(extention.value, critical=extention.critical) - - # if ca_key is not provided, it means self signed - if not ca_key: - ca_key = private_key - ca_key_password = encryption_password - - csr = csr.sign(private_key, hashes.SHA256(), default_backend()) - - if encryption_password: - encryption_algorithm = serialization.BestAvailableEncryption( - encryption_password) - else: - encryption_algorithm = serialization.NoEncryption() - - private_key = private_key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=encryption_algorithm - ) - - keypairs = { - 'private_key': private_key, - 'certificate': sign( - csr, - issuer_name, - ca_key, - ca_key_password=ca_key_password, - skip_validation=True), - } - return keypairs - - -def _load_pem_private_key(ca_key, ca_key_password=None): - if not isinstance(ca_key, rsa.RSAPrivateKey): - if isinstance(ca_key, six.text_type): - ca_key = six.b(str(ca_key)) - if isinstance(ca_key_password, six.text_type): - ca_key_password = six.b(str(ca_key_password)) - - ca_key = serialization.load_pem_private_key( - ca_key, - password=ca_key_password, - backend=default_backend() - ) - - return ca_key - - -def sign(csr, issuer_name, ca_key, ca_key_password=None, - skip_validation=False): - """Sign a given csr - - :param csr: certificate signing request object or pem encoded csr - :param issuer_name: issuer name - :param ca_key: private key of CA - :param ca_key_password: private key password for given ca key - :param skip_validation: skip csr validation if true - :returns: generated certificate - """ - - ca_key = _load_pem_private_key(ca_key, ca_key_password) - - if not isinstance(issuer_name, six.text_type): - issuer_name = six.text_type(issuer_name.decode('utf-8')) - - if isinstance(csr, six.text_type): - csr = six.b(str(csr)) - if not isinstance(csr, x509.CertificateSigningRequest): - try: - csr = x509.load_pem_x509_csr(csr, backend=default_backend()) - except ValueError: - LOG.exception("Received invalid csr {0}.".format(csr)) - raise exception.InvalidCsr(csr=csr) - - term_of_validity = CONF.x509.term_of_validity - one_day = datetime.timedelta(1, 0, 0) - expire_after = datetime.timedelta(term_of_validity, 0, 0) - - builder = x509.CertificateBuilder() - builder = builder.subject_name(csr.subject) - # issuer_name is set as common name - builder = builder.issuer_name(x509.Name([ - x509.NameAttribute(x509.OID_COMMON_NAME, issuer_name), - ])) - builder = builder.not_valid_before(datetime.datetime.today() - one_day) - builder = builder.not_valid_after(datetime.datetime.today() + expire_after) - builder = builder.serial_number(int(uuid.uuid4())) - builder = builder.public_key(csr.public_key()) - - if skip_validation: - extensions = csr.extensions - else: - extensions = validator.filter_extensions(csr.extensions) - - for extention in extensions: - builder = builder.add_extension(extention.value, - critical=extention.critical) - - certificate = builder.sign( - private_key=ca_key, algorithm=hashes.SHA256(), - backend=default_backend() - ).public_bytes(serialization.Encoding.PEM) - - return certificate - - -def decrypt_key(encrypted_key, password): - private_key = _load_pem_private_key(encrypted_key, password) - - decrypted_pem = private_key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=serialization.NoEncryption() - ) - return decrypted_pem diff --git a/magnum/common/x509/validator.py b/magnum/common/x509/validator.py deleted file mode 100644 index a8ad3756..00000000 --- a/magnum/common/x509/validator.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cryptography import x509 - -from magnum.common import exception -from magnum.common.x509 import extensions -import magnum.conf - -_CA_KEY_USAGES = [ - extensions.KeyUsages.KEY_CERT_SIGN.value[0], - extensions.KeyUsages.CRL_SIGN.value[0] -] - -CONF = magnum.conf.CONF - - -def filter_extensions(extensions): - filtered_extensions = [] - allowed_key_usage = set(CONF.x509.allowed_key_usage) - if not CONF.x509.allow_ca: - allowed_key_usage = _remove_ca_key_usage(allowed_key_usage) - - for ext in filter_allowed_extensions(extensions, - CONF.x509.allowed_extensions): - if ext.oid == x509.OID_KEY_USAGE: - ext = _merge_key_usage(ext, allowed_key_usage) - elif ext.oid == x509.OID_BASIC_CONSTRAINTS: - if not CONF.x509.allow_ca: - ext = _disallow_ca_in_basic_constraints(ext) - - filtered_extensions.append(ext) - - return filtered_extensions - - -def filter_allowed_extensions(extensions, allowed_extensions=None): - """Ensure only accepted extensions are used.""" - allowed_extensions = allowed_extensions or [] - - for ext in extensions: - ext_name = x509.oid._OID_NAMES.get(ext.oid, None) - if ext_name in allowed_extensions: - yield ext - else: - if ext.critical: - raise exception.CertificateValidationError(extension=ext) - - -def _merge_key_usage(key_usage, allowed_key_usage): - critical = key_usage.critical - key_usage_value = key_usage.value - - usages = [] - for usage in extensions.KeyUsages: - k, v = usage.value - try: - value = getattr(key_usage_value, v) - except ValueError: - # ValueError is raised when encipher_only/decipher_only is - # retrieved but key_agreement is False - value = False - if value: - if k not in allowed_key_usage: - if critical: - raise exception.CertificateValidationError( - extension=key_usage) - else: - value = False - usages.append(value) - - rtn = x509.KeyUsage(*usages) - return x509.Extension(rtn.oid, critical, rtn) - - -def _remove_ca_key_usage(allowed_key_usage): - for usage in _CA_KEY_USAGES: - try: - allowed_key_usage.remove(usage) - except KeyError: - pass - return allowed_key_usage - - -def _disallow_ca_in_basic_constraints(basic_constraints): - if basic_constraints.value.ca: - if basic_constraints.critical: - raise exception.CertificateValidationError( - extension=basic_constraints) - - bc = x509.BasicConstraints(False, None) - return x509.Extension(bc.oid, False, bc) - - return basic_constraints diff --git a/magnum/conductor/__init__.py b/magnum/conductor/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/conductor/api.py b/magnum/conductor/api.py deleted file mode 100644 index 9295f31e..00000000 --- a/magnum/conductor/api.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""API for interfacing with Magnum Backend.""" - -from magnum.common import profiler -from magnum.common import rpc_service -import magnum.conf - -CONF = magnum.conf.CONF - -# The Backend API class serves as a AMQP client for communicating -# on a topic exchange specific to the conductors. This allows the ReST -# API to trigger operations on the conductors - - -@profiler.trace_cls("rpc") -class API(rpc_service.API): - def __init__(self, transport=None, context=None, topic=None): - super(API, self).__init__(transport, context, - topic=CONF.conductor.topic) - - # Cluster Operations - - def cluster_create(self, cluster, create_timeout): - return self._call('cluster_create', cluster=cluster, - create_timeout=create_timeout) - - def cluster_create_async(self, cluster, create_timeout): - self._cast('cluster_create', cluster=cluster, - create_timeout=create_timeout) - - def cluster_delete(self, uuid): - return self._call('cluster_delete', uuid=uuid) - - def cluster_delete_async(self, uuid): - self._cast('cluster_delete', uuid=uuid) - - def cluster_update(self, cluster): - return self._call('cluster_update', cluster=cluster) - - def cluster_update_async(self, cluster, rollback=False): - self._cast('cluster_update', cluster=cluster, rollback=rollback) - - # CA operations - - def sign_certificate(self, cluster, certificate): - return self._call('sign_certificate', cluster=cluster, - certificate=certificate) - - def get_ca_certificate(self, cluster): - return self._call('get_ca_certificate', cluster=cluster) - - def rotate_ca_certificate(self, cluster): - return self._call('rotate_ca_certificate', cluster=cluster) - - # Versioned Objects indirection API - - def object_class_action(self, context, objname, objmethod, objver, - args, kwargs): - "Indirection API callback" - return self._client.call(context, 'object_class_action', - objname=objname, objmethod=objmethod, - objver=objver, args=args, kwargs=kwargs) - - def object_action(self, context, objinst, objmethod, args, kwargs): - "Indirection API callback" - return self._client.call(context, 'object_action', objinst=objinst, - objmethod=objmethod, args=args, kwargs=kwargs) - - def object_backport(self, context, objinst, target_version): - "Indirection API callback" - return self._client.call(context, 'object_backport', objinst=objinst, - target_version=target_version) - - -@profiler.trace_cls("rpc") -class ListenerAPI(rpc_service.API): - def __init__(self, context=None, topic=None, server=None, timeout=None): - super(ListenerAPI, self).__init__(context=context, topic=topic, - server=server, timeout=timeout) - - def ping_conductor(self): - return self._call('ping_conductor') diff --git a/magnum/conductor/handlers/__init__.py b/magnum/conductor/handlers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/conductor/handlers/ca_conductor.py b/magnum/conductor/handlers/ca_conductor.py deleted file mode 100644 index eee89c9d..00000000 --- a/magnum/conductor/handlers/ca_conductor.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from oslo_log import log as logging - -from magnum.common import profiler -from magnum.conductor.handlers.common import cert_manager -from magnum.drivers.common import driver -from magnum import objects -LOG = logging.getLogger(__name__) - - -@profiler.trace_cls("rpc") -class Handler(object): - """Magnum CA RPC handler. - - These are the backend operations. They are executed by the backend service. - API calls via AMQP (within the ReST API) trigger the handlers to be called. - - """ - - def __init__(self): - super(Handler, self).__init__() - - def sign_certificate(self, context, cluster, certificate): - LOG.debug("Creating self signed x509 certificate") - signed_cert = cert_manager.sign_node_certificate(cluster, - certificate.csr, - context=context) - certificate.pem = signed_cert - return certificate - - def get_ca_certificate(self, context, cluster): - ca_cert = cert_manager.get_cluster_ca_certificate(cluster, - context=context) - certificate = objects.Certificate.from_object_cluster(cluster) - certificate.pem = ca_cert.get_certificate() - return certificate - - def rotate_ca_certificate(self, context, cluster): - cluster_driver = driver.Driver.get_driver_for_cluster(context, - cluster) - cluster_driver.rotate_ca_certificate(context, cluster) diff --git a/magnum/conductor/handlers/cluster_conductor.py b/magnum/conductor/handlers/cluster_conductor.py deleted file mode 100755 index 344adae7..00000000 --- a/magnum/conductor/handlers/cluster_conductor.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from heatclient import exc -from oslo_log import log as logging -from pycadf import cadftaxonomy as taxonomy -import six - -from magnum.common import clients -from magnum.common import exception -from magnum.common import profiler -from magnum.conductor.handlers.common import cert_manager -from magnum.conductor.handlers.common import trust_manager -from magnum.conductor import scale_manager -from magnum.conductor import utils as conductor_utils -import magnum.conf -from magnum.drivers.common import driver -from magnum.i18n import _ -from magnum import objects -from magnum.objects import fields - -CONF = magnum.conf.CONF - -LOG = logging.getLogger(__name__) - - -@profiler.trace_cls("rpc") -class Handler(object): - - def __init__(self): - super(Handler, self).__init__() - - # Cluster Operations - - def cluster_create(self, context, cluster, create_timeout): - LOG.debug('cluster_heat cluster_create') - - osc = clients.OpenStackClients(context) - - cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS - cluster.status_reason = None - cluster.create() - - try: - # Create trustee/trust and set them to cluster - trust_manager.create_trustee_and_trust(osc, cluster) - # Generate certificate and set the cert reference to cluster - cert_manager.generate_certificates_to_cluster(cluster, - context=context) - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING) - # Get driver - cluster_driver = driver.Driver.get_driver_for_cluster(context, - cluster) - # Create cluster - cluster_driver.create_cluster(context, cluster, create_timeout) - cluster.save() - - except Exception as e: - cluster.status = fields.ClusterStatus.CREATE_FAILED - cluster.status_reason = six.text_type(e) - cluster.save() - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE) - - if isinstance(e, exc.HTTPBadRequest): - e = exception.InvalidParameterValue(message=six.text_type(e)) - - raise e - raise - - return cluster - - def cluster_update(self, context, cluster, rollback=False): - LOG.debug('cluster_heat cluster_update') - - osc = clients.OpenStackClients(context) - allow_update_status = ( - fields.ClusterStatus.CREATE_COMPLETE, - fields.ClusterStatus.UPDATE_COMPLETE, - fields.ClusterStatus.RESUME_COMPLETE, - fields.ClusterStatus.RESTORE_COMPLETE, - fields.ClusterStatus.ROLLBACK_COMPLETE, - fields.ClusterStatus.SNAPSHOT_COMPLETE, - fields.ClusterStatus.CHECK_COMPLETE, - fields.ClusterStatus.ADOPT_COMPLETE - ) - if cluster.status not in allow_update_status: - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) - operation = _('Updating a cluster when status is ' - '"%s"') % cluster.status - raise exception.NotSupported(operation=operation) - - delta = cluster.obj_what_changed() - if not delta: - return cluster - - manager = scale_manager.get_scale_manager(context, osc, cluster) - - # Get driver - ct = conductor_utils.retrieve_cluster_template(context, cluster) - cluster_driver = driver.Driver.get_driver(ct.server_type, - ct.cluster_distro, - ct.coe) - # Update cluster - try: - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING) - cluster_driver.update_cluster(context, cluster, manager, rollback) - cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS - cluster.status_reason = None - except Exception as e: - cluster.status = fields.ClusterStatus.UPDATE_FAILED - cluster.status_reason = six.text_type(e) - cluster.save() - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) - if isinstance(e, exc.HTTPBadRequest): - e = exception.InvalidParameterValue(message=six.text_type(e)) - raise e - raise - - cluster.save() - return cluster - - def cluster_delete(self, context, uuid): - LOG.debug('cluster_conductor cluster_delete') - osc = clients.OpenStackClients(context) - cluster = objects.Cluster.get_by_uuid(context, uuid) - ct = conductor_utils.retrieve_cluster_template(context, cluster) - cluster_driver = driver.Driver.get_driver(ct.server_type, - ct.cluster_distro, - ct.coe) - try: - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) - cluster_driver.delete_cluster(context, cluster) - cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS - cluster.status_reason = None - except exc.HTTPNotFound: - LOG.info('The cluster %s was not found during cluster' - ' deletion.', cluster.id) - try: - trust_manager.delete_trustee_and_trust(osc, context, cluster) - cert_manager.delete_certificates_from_cluster(cluster, - context=context) - cluster.destroy() - except exception.ClusterNotFound: - LOG.info('The cluster %s has been deleted by others.', - uuid) - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) - return None - except exc.HTTPConflict: - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) - raise exception.OperationInProgress(cluster_name=cluster.name) - except Exception as unexp: - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) - cluster.status = fields.ClusterStatus.DELETE_FAILED - cluster.status_reason = six.text_type(unexp) - cluster.save() - raise - - cluster.save() - return None diff --git a/magnum/conductor/handlers/common/__init__.py b/magnum/conductor/handlers/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/conductor/handlers/common/cert_manager.py b/magnum/conductor/handlers/common/cert_manager.py deleted file mode 100755 index e87df4ef..00000000 --- a/magnum/conductor/handlers/common/cert_manager.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -from oslo_log import log as logging -import six - -from magnum.common import cert_manager -from magnum.common import exception -from magnum.common import short_id -from magnum.common.x509 import operations as x509 - -CONDUCTOR_CLIENT_NAME = six.u('Magnum-Conductor') - -LOG = logging.getLogger(__name__) - - -def _generate_ca_cert(issuer_name, context=None): - """Generate and store ca_cert - - :param issuer_name: CA subject name - :returns: CA cert uuid and CA cert, CA private key password - """ - ca_password = short_id.generate_id() - ca_cert = x509.generate_ca_certificate(issuer_name, - encryption_password=ca_password) - ca_cert_ref = cert_manager.get_backend().CertManager.store_cert( - certificate=ca_cert['certificate'], - private_key=ca_cert['private_key'], - private_key_passphrase=ca_password, - name=issuer_name, - context=context, - ) - LOG.debug('CA cert is created: %s', ca_cert_ref) - return ca_cert_ref, ca_cert, ca_password - - -def _generate_client_cert(issuer_name, ca_cert, ca_password, context=None): - """Generate and store magnum_client_cert - - :param issuer_name: CA subject name - :param ca_cert: CA certificate - :param ca_password: CA private key password - :returns: Magnum client cert uuid - """ - client_password = short_id.generate_id() - client_cert = x509.generate_client_certificate( - issuer_name, - CONDUCTOR_CLIENT_NAME, - ca_cert['private_key'], - encryption_password=client_password, - ca_key_password=ca_password, - ) - magnum_cert_ref = cert_manager.get_backend().CertManager.store_cert( - certificate=client_cert['certificate'], - private_key=client_cert['private_key'], - private_key_passphrase=client_password, - name=CONDUCTOR_CLIENT_NAME, - context=context - ) - LOG.debug('Magnum client cert is created: %s', magnum_cert_ref) - return magnum_cert_ref - - -def _get_issuer_name(cluster): - issuer_name = cluster.name - # When user create a Cluster without name, the cluster.name is None. - # We should use cluster.uuid as issuer name. - if issuer_name is None: - issuer_name = cluster.uuid - return issuer_name - - -def generate_certificates_to_cluster(cluster, context=None): - """Generate ca_cert and magnum client cert and set to cluster - - :param cluster: The cluster to set CA cert and magnum client cert - :returns: CA cert uuid and magnum client cert uuid - """ - try: - issuer_name = _get_issuer_name(cluster) - - LOG.debug('Start to generate certificates: %s', issuer_name) - - ca_cert_ref, ca_cert, ca_password = _generate_ca_cert(issuer_name, - context=context) - magnum_cert_ref = _generate_client_cert(issuer_name, - ca_cert, - ca_password, - context=context) - - cluster.ca_cert_ref = ca_cert_ref - cluster.magnum_cert_ref = magnum_cert_ref - except Exception: - LOG.exception('Failed to generate certificates for Cluster: %s', - cluster.uuid) - raise exception.CertificatesToClusterFailed(cluster_uuid=cluster.uuid) - - -def get_cluster_ca_certificate(cluster, context=None): - ca_cert = cert_manager.get_backend().CertManager.get_cert( - cluster.ca_cert_ref, - resource_ref=cluster.uuid, - context=context - ) - - return ca_cert - - -def get_cluster_magnum_cert(cluster, context=None): - magnum_cert = cert_manager.get_backend().CertManager.get_cert( - cluster.magnum_cert_ref, - resource_ref=cluster.uuid, - context=context - ) - - return magnum_cert - - -def create_client_files(cluster, context=None): - ca_cert = get_cluster_ca_certificate(cluster, context) - magnum_cert = get_cluster_magnum_cert(cluster, context) - - ca_cert_file = tempfile.NamedTemporaryFile() - ca_cert_file.write(ca_cert.get_certificate()) - ca_cert_file.flush() - - magnum_key_file = tempfile.NamedTemporaryFile() - magnum_key_file.write(magnum_cert.get_decrypted_private_key()) - magnum_key_file.flush() - - magnum_cert_file = tempfile.NamedTemporaryFile() - magnum_cert_file.write(magnum_cert.get_certificate()) - magnum_cert_file.flush() - - return ca_cert_file, magnum_key_file, magnum_cert_file - - -def sign_node_certificate(cluster, csr, context=None): - ca_cert = cert_manager.get_backend().CertManager.get_cert( - cluster.ca_cert_ref, - resource_ref=cluster.uuid, - context=context - ) - - node_cert = x509.sign(csr, - _get_issuer_name(cluster), - ca_cert.get_private_key(), - ca_cert.get_private_key_passphrase()) - return node_cert - - -def delete_certificates_from_cluster(cluster, context=None): - """Delete ca cert and magnum client cert from cluster - - :param cluster: The cluster which has certs - """ - for cert_ref in ['ca_cert_ref', 'magnum_cert_ref']: - try: - cert_ref = getattr(cluster, cert_ref, None) - if cert_ref: - cert_manager.get_backend().CertManager.delete_cert( - cert_ref, resource_ref=cluster.uuid, context=context) - except Exception: - LOG.warning("Deleting certs is failed for Cluster %s", - cluster.uuid) diff --git a/magnum/conductor/handlers/common/trust_manager.py b/magnum/conductor/handlers/common/trust_manager.py deleted file mode 100755 index d49ff0a2..00000000 --- a/magnum/conductor/handlers/common/trust_manager.py +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from magnum.common import exception -from magnum.common import utils - -LOG = logging.getLogger(__name__) - - -def create_trustee_and_trust(osc, cluster): - try: - password = utils.generate_password(length=18) - - trustee = osc.keystone().create_trustee( - "%s_%s" % (cluster.uuid, cluster.project_id), - password, - ) - - cluster.trustee_username = trustee.name - cluster.trustee_user_id = trustee.id - cluster.trustee_password = password - - trust = osc.keystone().create_trust( - cluster.trustee_user_id) - cluster.trust_id = trust.id - - except Exception: - LOG.exception( - 'Failed to create trustee and trust for Cluster: %s', - cluster.uuid) - raise exception.TrusteeOrTrustToClusterFailed( - cluster_uuid=cluster.uuid) - - -def delete_trustee_and_trust(osc, context, cluster): - try: - kst = osc.keystone() - - # The cluster which is upgraded from Liberty doesn't have trust_id - if cluster.trust_id: - kst.delete_trust(context, cluster) - except Exception: - # Exceptions are already logged by keystone().delete_trust - pass - try: - # The cluster which is upgraded from Liberty doesn't have - # trustee_user_id - if cluster.trustee_user_id: - osc.keystone().delete_trustee(cluster.trustee_user_id) - except Exception: - # Exceptions are already logged by keystone().delete_trustee - pass diff --git a/magnum/conductor/handlers/conductor_listener.py b/magnum/conductor/handlers/conductor_listener.py deleted file mode 100644 index fd7710ac..00000000 --- a/magnum/conductor/handlers/conductor_listener.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from magnum.common import profiler - - -@profiler.trace_cls("rpc") -class Handler(object): - '''Listen on an AMQP queue named for the conductor. - - Allows individual conductors to communicate with each other for - multi-conductor support. - ''' - def ping_conductor(self, context): - '''Respond to conductor. - - Respond affirmatively to confirm that the conductor performing the - action is still alive. - ''' - return True diff --git a/magnum/conductor/handlers/indirection_api.py b/magnum/conductor/handlers/indirection_api.py deleted file mode 100644 index 1671c984..00000000 --- a/magnum/conductor/handlers/indirection_api.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import oslo_messaging as messaging - -from magnum.common import profiler -from magnum.objects import base - - -@profiler.trace_cls("rpc") -class Handler(object): - "Indirection API callbacks" - - def _object_dispatch(self, target, method, context, args, kwargs): - """Dispatch a call to an object method. - - This ensures that object methods get called and any exception - that is raised gets wrapped in an ExpectedException for forwarding - back to the caller (without spamming the conductor logs). - """ - try: - # NOTE(danms): Keep the getattr inside the try block since - # a missing method is really a client problem - return getattr(target, method)(context, *args, **kwargs) - except Exception: - raise messaging.ExpectedException() - - def object_class_action(self, context, objname, objmethod, - objver, args, kwargs): - """Perform a classmethod action on an object.""" - objclass = base.MagnumObject.obj_class_from_name(objname, objver) - result = self._object_dispatch(objclass, objmethod, context, - args, kwargs) - # NOTE(danms): The RPC layer will convert to primitives for us, - # but in this case, we need to honor the version the client is - # asking for, so we do it before returning here. - return (result.obj_to_primitive(target_version=objver) - if isinstance(result, base.MagnumObject) else result) - - def object_action(self, context, objinst, objmethod, args, kwargs): - """Perform an action on an object.""" - old_objinst = objinst.obj_clone() - result = self._object_dispatch(objinst, objmethod, context, - args, kwargs) - updates = dict() - # NOTE(danms): Diff the object with the one passed to us and - # generate a list of changes to forward back - for name, field in objinst.fields.items(): - if not objinst.obj_attr_is_set(name): - # Avoid demand-loading anything - continue - if (not old_objinst.obj_attr_is_set(name) or - getattr(old_objinst, name) != getattr(objinst, name)): - updates[name] = field.to_primitive(objinst, name, - getattr(objinst, name)) - updates['obj_what_changed'] = objinst.obj_what_changed() - return updates, result - - def object_backport(self, context, objinst, target_version): - return objinst.obj_to_primitive(target_version=target_version) diff --git a/magnum/conductor/k8s_api.py b/magnum/conductor/k8s_api.py deleted file mode 100755 index f78ee578..00000000 --- a/magnum/conductor/k8s_api.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import tempfile - -from kubernetes import client as k8s_config -from kubernetes.client import api_client -from kubernetes.client.apis import core_v1_api -from oslo_log import log as logging - -from magnum.conductor.handlers.common.cert_manager import create_client_files - -LOG = logging.getLogger(__name__) - - -class K8sAPI(core_v1_api.CoreV1Api): - - def _create_temp_file_with_content(self, content): - """Creates temp file and write content to the file. - - :param content: file content - :returns: temp file - """ - try: - tmp = tempfile.NamedTemporaryFile(delete=True) - tmp.write(content) - tmp.flush() - except Exception as err: - LOG.error("Error while creating temp file: %s", err) - raise - return tmp - - def __init__(self, context, cluster): - self.ca_file = None - self.cert_file = None - self.key_file = None - - if cluster.magnum_cert_ref: - (self.ca_file, self.key_file, - self.cert_file) = create_client_files(cluster, context) - - config = k8s_config.ConfigurationObject() - config.host = cluster.api_address - config.ssl_ca_cert = self.ca_file.name - config.cert_file = self.cert_file.name - config.key_file = self.key_file.name - - # build a connection with Kubernetes master - client = api_client.ApiClient(config=config) - - super(K8sAPI, self).__init__(client) - - def __del__(self): - if self.ca_file: - self.ca_file.close() - if self.cert_file: - self.cert_file.close() - if self.key_file: - self.key_file.close() - - -def create_k8s_api(context, cluster): - """Create a kubernetes API client - - Creates connection with Kubernetes master and creates ApivApi instance - to call Kubernetes APIs. - - :param context: The security context - :param cluster: Cluster object - """ - return K8sAPI(context, cluster) diff --git a/magnum/conductor/monitors.py b/magnum/conductor/monitors.py deleted file mode 100644 index 53602048..00000000 --- a/magnum/conductor/monitors.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -from oslo_log import log -import six - -from magnum.common import profiler -import magnum.conf -from magnum.drivers.common.driver import Driver - - -LOG = log.getLogger(__name__) - -CONF = magnum.conf.CONF - - -@profiler.trace_cls("rpc") -@six.add_metaclass(abc.ABCMeta) -class MonitorBase(object): - - def __init__(self, context, cluster): - self.context = context - self.cluster = cluster - - @abc.abstractproperty - def metrics_spec(self): - """Metric specification.""" - - @abc.abstractmethod - def pull_data(self): - """Pull data for monitoring.""" - - def get_metric_names(self): - return self.metrics_spec.keys() - - def get_metric_unit(self, metric_name): - return self.metrics_spec[metric_name]['unit'] - - def compute_metric_value(self, metric_name): - func_name = self.metrics_spec[metric_name]['func'] - func = getattr(self, func_name) - return func() - - -def create_monitor(context, cluster): - cluster_driver = Driver.get_driver_for_cluster(context, cluster) - monitor = cluster_driver.get_monitor(context, cluster) - if monitor: - return monitor - - LOG.debug("Cannot create monitor with cluster type '%s'", - cluster.cluster_template.coe) - return None diff --git a/magnum/conductor/scale_manager.py b/magnum/conductor/scale_manager.py deleted file mode 100755 index 6ed0b187..00000000 --- a/magnum/conductor/scale_manager.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -from oslo_log import log as logging - -from magnum.common import exception -from magnum.drivers.common.driver import Driver -from magnum.i18n import _ -from magnum import objects - - -LOG = logging.getLogger(__name__) - - -def get_scale_manager(context, osclient, cluster): - cluster_driver = Driver.get_driver_for_cluster(context, cluster) - manager = cluster_driver.get_scale_manager(context, osclient, cluster) - if not manager: - LOG.warning( - "Currently only kubernetes and mesos cluster scale manager " - "are available") - - return manager - - -class ScaleManager(object): - - def __init__(self, context, osclient, cluster): - self.context = context - self.osclient = osclient - self.old_cluster = objects.Cluster.get_by_uuid(context, cluster.uuid) - self.new_cluster = cluster - - def get_removal_nodes(self, hosts_output): - if not self._is_scale_down(): - return list() - - cluster = self.new_cluster - stack = self.osclient.heat().stacks.get(cluster.stack_id) - hosts = hosts_output.get_output_value(stack) - if hosts is None: - raise exception.MagnumException(_( - "Output key '%(output_key)s' is missing from stack " - "%(stack_id)s") % {'output_key': hosts_output.heat_output, - 'stack_id': stack.id}) - - hosts_with_container = self._get_hosts_with_container(self.context, - cluster) - hosts_no_container = list(set(hosts) - hosts_with_container) - LOG.debug('List of hosts that has no container: %s', - str(hosts_no_container)) - - num_of_removal = self._get_num_of_removal() - if len(hosts_no_container) < num_of_removal: - LOG.warning( - "About to remove %(num_removal)d nodes, which is larger than " - "the number of empty nodes (%(num_empty)d). %(num_non_empty)d " - "non-empty nodes will be removed.", { - 'num_removal': num_of_removal, - 'num_empty': len(hosts_no_container), - 'num_non_empty': num_of_removal - len(hosts_no_container)}) - - hosts_to_remove = hosts_no_container[0:num_of_removal] - LOG.info('Require removal of hosts: %s', hosts_to_remove) - - return hosts_to_remove - - def _is_scale_down(self): - return self.new_cluster.node_count < self.old_cluster.node_count - - def _get_num_of_removal(self): - return self.old_cluster.node_count - self.new_cluster.node_count - - @abc.abstractmethod - def _get_hosts_with_container(self, context, cluster): - """Return the hosts with container running on them.""" - pass diff --git a/magnum/conductor/tasks/__init__.py b/magnum/conductor/tasks/__init__.py deleted file mode 100644 index 3f263ca0..00000000 --- a/magnum/conductor/tasks/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import taskflow.task as task - - -class OSBaseTask(task.Task): - def __init__(self, os_client, name=None, **kwargs): - self.os_client = os_client - - super(OSBaseTask, self).__init__(name=name, **kwargs) diff --git a/magnum/conductor/tasks/heat_tasks.py b/magnum/conductor/tasks/heat_tasks.py deleted file mode 100644 index 01f92d5a..00000000 --- a/magnum/conductor/tasks/heat_tasks.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.conductor import tasks - - -class CreateStack(tasks.OSBaseTask): - """CreateStack Task - - This task interfaces with Heat API and creates a stack based on parameters - provided to the Task. - - """ - - def execute(self, stack_name, parameters, template, files): - stack = self.os_client.stacks.create(stack_name=stack_name, - parameters=parameters, - template=template, files=files) - return stack - - -class UpdateStack(tasks.OSBaseTask): - """UpdateStack Task - - This task interfaces with Heat API and update a stack based on parameters - provided to the Task. - - """ - - def execute(self, stack_id, parameters, template, files): - self.os_client.stacks.update(stack_id, parameters=parameters, - template=template, files=files) - - -class DeleteStack(tasks.OSBaseTask): - """DeleteStack Task - - This task interfaces with Heat API and delete a stack based on parameters - provided to the Task. - - """ - - def execute(self, stack_id): - self.os_client.stacks.delete(stack_id) diff --git a/magnum/conductor/utils.py b/magnum/conductor/utils.py deleted file mode 100644 index f7eb2e7b..00000000 --- a/magnum/conductor/utils.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils -from pycadf import cadftaxonomy as taxonomy -from pycadf import cadftype -from pycadf import eventfactory -from pycadf import resource - -from magnum.common import clients -from magnum.common import rpc -from magnum.objects import cluster -from magnum.objects import cluster_template - - -def retrieve_cluster(context, cluster_ident): - if not uuidutils.is_uuid_like(cluster_ident): - return cluster.Cluster.get_by_name(context, cluster_ident) - else: - return cluster.Cluster.get_by_uuid(context, cluster_ident) - - -def retrieve_cluster_template(context, cluster): - return cluster_template.ClusterTemplate.get_by_uuid( - context, cluster.cluster_template_id) - - -def retrieve_cluster_uuid(context, cluster_ident): - if not uuidutils.is_uuid_like(cluster_ident): - cluster_obj = cluster.Cluster.get_by_name(context, cluster_ident) - return cluster_obj.uuid - else: - return cluster_ident - - -def object_has_stack(context, cluster_uuid): - osc = clients.OpenStackClients(context) - obj = retrieve_cluster(context, cluster_uuid) - - stack = osc.heat().stacks.get(obj.stack_id) - if (stack.stack_status == 'DELETE_COMPLETE' or - stack.stack_status == 'DELETE_IN_PROGRESS'): - return False - - return True - - -def _get_request_audit_info(context): - """Collect audit information about the request used for CADF. - - :param context: Request context - :returns: Auditing data about the request - :rtype: :class:'pycadf.Resource' - """ - user_id = None - project_id = None - domain_id = None - - if context: - user_id = context.user_id - project_id = context.project_id - domain_id = context.domain_id - - initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) - - if user_id: - initiator.user_id = user_id - - if project_id: - initiator.project_id = project_id - - if domain_id: - initiator.domain_id = domain_id - - return initiator - - -def notify_about_cluster_operation(context, action, outcome): - """Send a notification about cluster operation. - - :param action: CADF action being audited - :param outcome: CADF outcome - """ - notifier = rpc.get_notifier() - event = eventfactory.EventFactory().new_event( - eventType=cadftype.EVENTTYPE_ACTIVITY, - outcome=outcome, - action=action, - initiator=_get_request_audit_info(context), - target=resource.Resource(typeURI='service/magnum/cluster'), - observer=resource.Resource(typeURI='service/magnum/cluster')) - service = 'magnum' - event_type = '%(service)s.cluster.%(action)s' % { - 'service': service, 'action': action} - payload = event.as_dict() - - if outcome == taxonomy.OUTCOME_FAILURE: - method = notifier.error - else: - method = notifier.info - - method(context, event_type, payload) diff --git a/magnum/conf/__init__.py b/magnum/conf/__init__.py deleted file mode 100644 index 35b4cb07..00000000 --- a/magnum/conf/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2016 Fujitsu Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from magnum.conf import api -from magnum.conf import barbican -from magnum.conf import certificates -from magnum.conf import cinder -from magnum.conf import cluster -from magnum.conf import cluster_heat -from magnum.conf import cluster_templates -from magnum.conf import conductor -from magnum.conf import database -from magnum.conf import docker -from magnum.conf import docker_registry -from magnum.conf import glance -from magnum.conf import heat -from magnum.conf import keystone -from magnum.conf import magnum_client -from magnum.conf import neutron -from magnum.conf import nova -from magnum.conf import paths -from magnum.conf import profiler -from magnum.conf import quota -from magnum.conf import rpc -from magnum.conf import services -from magnum.conf import trust -from magnum.conf import utils -from magnum.conf import x509 - -CONF = cfg.CONF - -api.register_opts(CONF) -barbican.register_opts(CONF) -cluster.register_opts(CONF) -cluster_templates.register_opts(CONF) -cluster_heat.register_opts(CONF) -certificates.register_opts(CONF) -cinder.register_opts(CONF) -conductor.register_opts(CONF) -database.register_opts(CONF) -docker.register_opts(CONF) -docker_registry.register_opts(CONF) -glance.register_opts(CONF) -heat.register_opts(CONF) -keystone.register_opts(CONF) -magnum_client.register_opts(CONF) -neutron.register_opts(CONF) -nova.register_opts(CONF) -paths.register_opts(CONF) -quota.register_opts(CONF) -rpc.register_opts(CONF) -services.register_opts(CONF) -trust.register_opts(CONF) -utils.register_opts(CONF) -x509.register_opts(CONF) -profiler.register_opts(CONF) diff --git a/magnum/conf/api.py b/magnum/conf/api.py deleted file mode 100644 index 0df0da88..00000000 --- a/magnum/conf/api.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -api_group = cfg.OptGroup(name='api', - title='Options for the magnum-api service') - -api_service_opts = [ - cfg.PortOpt('port', - default=9511, - help='The port for the Magnum API server.'), - cfg.IPOpt('host', - default='127.0.0.1', - help='The listen IP for the Magnum API server.'), - cfg.IntOpt('max_limit', - default=1000, - help='The maximum number of items returned in a single ' - 'response from a collection resource.'), - cfg.StrOpt('api_paste_config', - default="api-paste.ini", - help="Configuration file for WSGI definition of API." - ), - cfg.StrOpt('ssl_cert_file', - help="This option allows setting path to the SSL certificate " - "of API server. "), - cfg.StrOpt('ssl_key_file', - help="This option specifies the path to the file where SSL " - "private key of API server is stored when SSL is in " - "effect. "), - cfg.BoolOpt('enabled_ssl', - default=False, - help='Enable SSL Magnum API service'), - cfg.IntOpt('workers', - help='The maximum number of magnum-api processes to ' - 'fork and run. Default to number of CPUs on the host.') -] - - -def register_opts(conf): - conf.register_group(api_group) - conf.register_opts(api_service_opts, group=api_group) - - -def list_opts(): - return { - api_group: api_service_opts - } diff --git a/magnum/conf/barbican.py b/magnum/conf/barbican.py deleted file mode 100644 index da976721..00000000 --- a/magnum/conf/barbican.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -barbican_group = cfg.OptGroup(name='barbican_client', - title='Options for the Barbican client') - -barbican_client_opts = [ - cfg.StrOpt('region_name', - help=_('Region in Identity service catalog to use for ' - 'communication with the OpenStack service.')), - cfg.StrOpt('endpoint_type', - default='publicURL', - help=_('Type of endpoint in Identity service catalog to use ' - 'for communication with the OpenStack service.'))] - - -def register_opts(conf): - conf.register_group(barbican_group) - conf.register_opts(barbican_client_opts, group=barbican_group) - - -def list_opts(): - return { - barbican_group: barbican_client_opts - } diff --git a/magnum/conf/certificates.py b/magnum/conf/certificates.py deleted file mode 100644 index ccad6e1a..00000000 --- a/magnum/conf/certificates.py +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools -from oslo_config import cfg - -DEFAULT_CERT_MANAGER = 'barbican' -TLS_STORAGE_DEFAULT = '/var/lib/magnum/certificates/' - -certificates_group = cfg.OptGroup(name='certificates', - title='Certificate options for the ' - 'cert manager.') - -cert_manager_opts = [ - cfg.StrOpt('cert_manager_type', - default=DEFAULT_CERT_MANAGER, - help='Certificate Manager plugin. ' - 'Defaults to {0}.'.format(DEFAULT_CERT_MANAGER)) -] - -local_cert_manager_opts = [ - cfg.StrOpt('storage_path', - default=TLS_STORAGE_DEFAULT, - help='Absolute path of the certificate storage directory. ' - 'Defaults to /var/lib/magnum/certificates/.') -] - -ALL_OPTS = list(itertools.chain( - cert_manager_opts, - local_cert_manager_opts -)) - - -def register_opts(conf): - conf.register_group(certificates_group) - conf.register_opts(ALL_OPTS, group=certificates_group) - - -def list_opts(): - return { - certificates_group: ALL_OPTS - } diff --git a/magnum/conf/cinder.py b/magnum/conf/cinder.py deleted file mode 100644 index f765663e..00000000 --- a/magnum/conf/cinder.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -cinder_group = cfg.OptGroup( - name='cinder', - title='Options for the Cinder configuration') - -cinder_client_group = cfg.OptGroup( - name='cinder_client', - title='Options for the Cinder client') - -cinder_opts = [ - cfg.StrOpt('default_docker_volume_type', - help=_('The default docker volume_type to use for volumes ' - 'used for docker storage. To use the cinder volumes ' - 'for docker storage, you need to select a default ' - 'value.'))] - -cinder_client_opts = [ - cfg.StrOpt('region_name', - help=_('Region in Identity service catalog to use for ' - 'communication with the OpenStack service.'))] - - -def register_opts(conf): - conf.register_group(cinder_group) - conf.register_group(cinder_client_group) - conf.register_opts(cinder_opts, group=cinder_group) - conf.register_opts(cinder_client_opts, group=cinder_client_group) - - -def list_opts(): - return { - cinder_group: cinder_opts, - cinder_client_group: cinder_client_opts - } diff --git a/magnum/conf/cluster.py b/magnum/conf/cluster.py deleted file mode 100644 index 83b38bc3..00000000 --- a/magnum/conf/cluster.py +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -cluster_group = cfg.OptGroup(name='cluster', - title='Options for Cluster configuration') - -cluster_def_opts = [ - cfg.StrOpt('etcd_discovery_service_endpoint_format', - default='https://discovery.etcd.io/new?size=%(size)d', - help=_('Url for etcd public discovery endpoint.'), - deprecated_group='bay'), - cfg.ListOpt('enabled_definitions', - deprecated_for_removal=True, - deprecated_reason=_('This configuration option is no longer ' - 'used. Installing a new driver enables ' - 'it for use automatically.'), - default=['magnum_vm_atomic_k8s', 'magnum_bm_fedora_k8s', - 'magnum_vm_coreos_k8s', 'magnum_vm_atomic_swarm', - 'magnum_vm_ubuntu_mesos'], - help=_('Enabled cluster definition entry points.'), - deprecated_group='bay'), -] - - -def register_opts(conf): - conf.register_group(cluster_group) - conf.register_opts(cluster_def_opts, group=cluster_group) - - -def list_opts(): - return { - cluster_group: cluster_def_opts - } diff --git a/magnum/conf/cluster_heat.py b/magnum/conf/cluster_heat.py deleted file mode 100644 index 07eb9e52..00000000 --- a/magnum/conf/cluster_heat.py +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -cluster_heat_group = cfg.OptGroup(name='cluster_heat', - title='Heat options for Cluster ' - 'configuration') - -cluster_heat_opts = [ - cfg.IntOpt('max_attempts', - default=2000, - help=('Number of attempts to query the Heat stack for ' - 'finding out the status of the created stack and ' - 'getting template outputs. This value is ignored ' - 'during cluster creation if timeout is set as the poll ' - 'will continue until cluster creation either ends ' - 'or times out.'), - deprecated_group='bay_heat'), - cfg.IntOpt('wait_interval', - default=1, - help=('Sleep time interval between two attempts of querying ' - 'the Heat stack. This interval is in seconds.'), - deprecated_group='bay_heat'), - cfg.IntOpt('create_timeout', - default=60, - help=('The length of time to let cluster creation continue. ' - 'This interval is in minutes. The default is 60 minutes.' - ), - deprecated_group='bay_heat', - deprecated_name='bay_create_timeout') -] - - -def register_opts(conf): - conf.register_group(cluster_heat_group) - conf.register_opts(cluster_heat_opts, group=cluster_heat_group) - - -def list_opts(): - return { - cluster_heat_group: cluster_heat_opts - } diff --git a/magnum/conf/cluster_templates.py b/magnum/conf/cluster_templates.py deleted file mode 100644 index e48aea12..00000000 --- a/magnum/conf/cluster_templates.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -cluster_template_group = cfg.OptGroup(name='cluster_template', - title='Options for cluster_template') - -cluster_template_opts = [ - cfg.ListOpt('kubernetes_allowed_network_drivers', - default=['all'], - help=_("Allowed network drivers for kubernetes " - "cluster-templates. Use 'all' keyword to allow all " - "drivers supported for kubernetes cluster-templates. " - "Supported network drivers include flannel."), - deprecated_group='baymodel'), - cfg.StrOpt('kubernetes_default_network_driver', - default='flannel', - help=_("Default network driver for kubernetes " - "cluster-templates."), - deprecated_group='baymodel'), - cfg.ListOpt('swarm_allowed_network_drivers', - default=['all'], - help=_("Allowed network drivers for docker swarm " - "cluster-templates. Use 'all' keyword to allow all " - "drivers supported for swarm cluster-templates. " - "Supported network drivers include docker and flannel." - ), - deprecated_group='baymodel'), - cfg.StrOpt('swarm_default_network_driver', - default='docker', - help=_("Default network driver for docker swarm " - "cluster-templates."), - deprecated_group='baymodel'), - cfg.ListOpt('mesos_allowed_network_drivers', - default=['all'], - help=_("Allowed network drivers for mesos cluster-templates. " - "Use 'all' keyword to allow all drivers supported " - "for mesos cluster-templates. Supported network " - "drivers include docker."), - deprecated_group='baymodel'), - cfg.StrOpt('mesos_default_network_driver', - default='docker', - help=_("Default network driver for mesos cluster-templates."), - deprecated_group='baymodel'), -] - - -def register_opts(conf): - conf.register_group(cluster_template_group) - conf.register_opts(cluster_template_opts, group=cluster_template_group) - - -def list_opts(): - return { - cluster_template_group: cluster_template_opts - } diff --git a/magnum/conf/conductor.py b/magnum/conf/conductor.py deleted file mode 100644 index 410785d5..00000000 --- a/magnum/conf/conductor.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -conductor_group = cfg.OptGroup(name='conductor', - title='Options for the magnum-conductor ' - 'service') - -conductor_service_opts = [ - cfg.StrOpt('topic', - default='magnum-conductor', - help='The queue to add conductor tasks to.'), - cfg.IntOpt('conductor_life_check_timeout', - default=4, - help=('RPC timeout for the conductor liveness check that is ' - 'used for cluster locking.')), - cfg.IntOpt('workers', - help='Number of magnum-conductor processes to fork and run. ' - 'Default to number of CPUs on the host.') -] - - -def register_opts(conf): - conf.register_group(conductor_group) - conf.register_opts(conductor_service_opts, group=conductor_group) - - -def list_opts(): - return { - conductor_group: conductor_service_opts - } diff --git a/magnum/conf/database.py b/magnum/conf/database.py deleted file mode 100644 index c0568bac..00000000 --- a/magnum/conf/database.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_db import options - -from magnum.conf import paths - - -_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('magnum.sqlite') - -database_group = cfg.OptGroup(name='database', - title='Options for Magnum Database') - -sql_opts = [ - cfg.StrOpt('mysql_engine', - default='InnoDB', - help='MySQL engine to use.') -] - - -def register_opts(conf): - conf.register_group(database_group) - conf.register_opts(sql_opts, group=database_group) - options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION) - - -def list_opts(): - return { - database_group: sql_opts - } diff --git a/magnum/conf/docker.py b/magnum/conf/docker.py deleted file mode 100644 index fc65c069..00000000 --- a/magnum/conf/docker.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -docker_group = cfg.OptGroup(name='docker', - title='Options for Docker engine') - -docker_opts = [ - cfg.StrOpt('docker_remote_api_version', - default='1.21', - help='Docker remote api version. Override it according to ' - 'specific docker api version in your environment.'), - cfg.IntOpt('default_timeout', - default=60, - help='Default timeout in seconds for docker client ' - 'operations.'), - cfg.BoolOpt('api_insecure', - default=False, - help='If set, ignore any SSL validation issues'), - cfg.StrOpt('ca_file', - help='Location of CA certificates file for ' - 'securing docker api requests (tlscacert).'), - cfg.StrOpt('cert_file', - help='Location of TLS certificate file for ' - 'securing docker api requests (tlscert).'), - cfg.StrOpt('key_file', - help='Location of TLS private key file for ' - 'securing docker api requests (tlskey).'), -] - - -def register_opts(conf): - conf.register_group(docker_group) - conf.register_opts(docker_opts, group=docker_group) - - -def list_opts(): - return { - docker_group: docker_opts - } diff --git a/magnum/conf/docker_registry.py b/magnum/conf/docker_registry.py deleted file mode 100644 index 36b27f55..00000000 --- a/magnum/conf/docker_registry.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -docker_registry_group = cfg.OptGroup(name='docker_registry', - title='Options for Docker Registry') - -docker_registry_opts = [ - cfg.StrOpt('swift_region', - help=_('Region name of Swift')), - cfg.StrOpt('swift_registry_container', - default='docker_registry', - help=_('Name of the container in Swift which docker registry ' - 'stores images in')) -] - - -def register_opts(conf): - conf.register_group(docker_registry_group) - conf.register_opts(docker_registry_opts, group=docker_registry_group) - - -def list_opts(): - return { - docker_registry_group: docker_registry_opts - } diff --git a/magnum/conf/glance.py b/magnum/conf/glance.py deleted file mode 100644 index b45f397d..00000000 --- a/magnum/conf/glance.py +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -from oslo_config import cfg - -from magnum.i18n import _ - -glance_group = cfg.OptGroup(name='glance_client', - title='Options for the Glance client') - -glance_client_opts = [ - cfg.StrOpt('region_name', - help=_('Region in Identity service catalog to use for ' - 'communication with the OpenStack service.')), - cfg.StrOpt('endpoint_type', - default='publicURL', - help=_('Type of endpoint in Identity service catalog to use ' - 'for communication with the OpenStack service.')), - cfg.StrOpt('api_version', - default='2', - help=_('Version of Glance API to use in glanceclient.'))] - -common_security_opts = [ - cfg.StrOpt('ca_file', - help=_('Optional CA cert file to use in SSL connections.')), - cfg.StrOpt('cert_file', - help=_('Optional PEM-formatted certificate chain file.')), - cfg.StrOpt('key_file', - help=_('Optional PEM-formatted file that contains the ' - 'private key.')), - cfg.BoolOpt('insecure', - default=False, - help=_("If set, then the server's certificate will not " - "be verified."))] - -ALL_OPTS = list(itertools.chain( - glance_client_opts, - common_security_opts -)) - - -def register_opts(conf): - conf.register_group(glance_group) - conf.register_opts(ALL_OPTS, group=glance_group) - - -def list_opts(): - return { - glance_group: ALL_OPTS - } diff --git a/magnum/conf/heat.py b/magnum/conf/heat.py deleted file mode 100644 index 9f41d58b..00000000 --- a/magnum/conf/heat.py +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -from oslo_config import cfg - -from magnum.i18n import _ - -heat_group = cfg.OptGroup(name='heat_client', - title='Options for the Heat client') - -heat_client_opts = [ - cfg.StrOpt('region_name', - help=_('Region in Identity service catalog to use for ' - 'communication with the OpenStack service.')), - cfg.StrOpt('endpoint_type', - default='publicURL', - help=_('Type of endpoint in Identity service catalog to use ' - 'for communication with the OpenStack service.')), - cfg.StrOpt('api_version', - default='1', - help=_('Version of Heat API to use in heatclient.'))] - -common_security_opts = [ - cfg.StrOpt('ca_file', - help=_('Optional CA cert file to use in SSL connections.')), - cfg.StrOpt('cert_file', - help=_('Optional PEM-formatted certificate chain file.')), - cfg.StrOpt('key_file', - help=_('Optional PEM-formatted file that contains the ' - 'private key.')), - cfg.BoolOpt('insecure', - default=False, - help=_("If set, then the server's certificate will not " - "be verified."))] - -ALL_OPTS = list(itertools.chain( - heat_client_opts, - common_security_opts -)) - - -def register_opts(conf): - conf.register_group(heat_group) - conf.register_opts(ALL_OPTS, group=heat_group) - - -def list_opts(): - return { - heat_group: ALL_OPTS - } diff --git a/magnum/conf/keystone.py b/magnum/conf/keystone.py deleted file mode 100644 index 74b582fb..00000000 --- a/magnum/conf/keystone.py +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from keystoneauth1 import loading as ka_loading -from oslo_config import cfg - -CFG_GROUP = 'keystone_auth' -CFG_LEGACY_GROUP = 'keystone_authtoken' - -legacy_session_opts = { - 'certfile': [cfg.DeprecatedOpt('certfile', CFG_LEGACY_GROUP)], - 'keyfile': [cfg.DeprecatedOpt('keyfile', CFG_LEGACY_GROUP)], - 'cafile': [cfg.DeprecatedOpt('cafile', CFG_LEGACY_GROUP)], - 'insecure': [cfg.DeprecatedOpt('insecure', CFG_LEGACY_GROUP)], - 'timeout': [cfg.DeprecatedOpt('timeout', CFG_LEGACY_GROUP)], -} - -keystone_auth_group = cfg.OptGroup(name=CFG_GROUP, - title='Options for Keystone in Magnum') - - -def register_opts(conf): - # FIXME(pauloewerton): remove import of authtoken group and legacy options - # after deprecation period - conf.import_group(CFG_LEGACY_GROUP, 'keystonemiddleware.auth_token') - ka_loading.register_auth_conf_options(conf, CFG_GROUP) - ka_loading.register_session_conf_options( - conf, CFG_GROUP, deprecated_opts=legacy_session_opts) - conf.set_default('auth_type', default='password', group=CFG_GROUP) - - -def list_opts(): - keystone_auth_opts = (ka_loading.get_auth_common_conf_options() + - ka_loading.get_auth_plugin_conf_options('password')) - return { - keystone_auth_group: keystone_auth_opts - } diff --git a/magnum/conf/magnum_client.py b/magnum/conf/magnum_client.py deleted file mode 100644 index 4a2963a4..00000000 --- a/magnum/conf/magnum_client.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -magnum_client_group = cfg.OptGroup(name='magnum_client', - title='Options for the Magnum client') - -magnum_client_opts = [ - cfg.StrOpt('region_name', - help=_('Region in Identity service catalog to use for ' - 'communication with the OpenStack service.')), - cfg.StrOpt('endpoint_type', - default='publicURL', - help=_('Type of endpoint in Identity service catalog to use ' - 'for communication with the OpenStack service.'))] - - -def register_opts(conf): - conf.register_group(magnum_client_group) - conf.register_opts(magnum_client_opts, group=magnum_client_group) - - -def list_opts(): - return { - magnum_client_group: magnum_client_opts - } diff --git a/magnum/conf/neutron.py b/magnum/conf/neutron.py deleted file mode 100644 index c5f79c1d..00000000 --- a/magnum/conf/neutron.py +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -from oslo_config import cfg - -from magnum.i18n import _ - -neutron_group = cfg.OptGroup(name='neutron_client', - title='Options for the neutron client') - -neutron_client_opts = [ - cfg.StrOpt('region_name', - help=_('Region in Identity service catalog to use for ' - 'communication with the OpenStack service.')), - cfg.StrOpt('endpoint_type', - default='publicURL', - help=_('Type of endpoint in Identity service catalog to use ' - 'for communication with the OpenStack service.'))] - -common_security_opts = [ - cfg.StrOpt('ca_file', - help=_('Optional CA cert file to use in SSL connections.')), - cfg.StrOpt('cert_file', - help=_('Optional PEM-formatted certificate chain file.')), - cfg.StrOpt('key_file', - help=_('Optional PEM-formatted file that contains the ' - 'private key.')), - cfg.BoolOpt('insecure', - default=False, - help=_("If set, then the server's certificate will not " - "be verified."))] - -ALL_OPTS = list(itertools.chain( - neutron_client_opts, - common_security_opts -)) - - -def register_opts(conf): - conf.register_group(neutron_group) - conf.register_opts(ALL_OPTS, group=neutron_group) - - -def list_opts(): - return { - neutron_group: ALL_OPTS - } diff --git a/magnum/conf/nova.py b/magnum/conf/nova.py deleted file mode 100644 index be85ce8f..00000000 --- a/magnum/conf/nova.py +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -from oslo_config import cfg - -from magnum.i18n import _ - -nova_group = cfg.OptGroup(name='nova_client', - title='Options for the nova client') - -nova_client_opts = [ - cfg.StrOpt('region_name', - help=_('Region in Identity service catalog to use for ' - 'communication with the OpenStack service.')), - cfg.StrOpt('endpoint_type', - default='publicURL', - help=_('Type of endpoint in Identity service catalog to use ' - 'for communication with the OpenStack service.')), - cfg.StrOpt('api_version', - default='2', - help=_('Version of Nova API to use in novaclient.'))] - -common_security_opts = [ - cfg.StrOpt('ca_file', - help=_('Optional CA cert file to use in SSL connections.')), - cfg.StrOpt('cert_file', - help=_('Optional PEM-formatted certificate chain file.')), - cfg.StrOpt('key_file', - help=_('Optional PEM-formatted file that contains the ' - 'private key.')), - cfg.BoolOpt('insecure', - default=False, - help=_("If set, then the server's certificate will not " - "be verified."))] - -ALL_OPTS = list(itertools.chain( - nova_client_opts, - common_security_opts -)) - - -def register_opts(conf): - conf.register_group(nova_group) - conf.register_opts(ALL_OPTS, group=nova_group) - - -def list_opts(): - return { - nova_group: ALL_OPTS - } diff --git a/magnum/conf/opts.py b/magnum/conf/opts.py deleted file mode 100644 index 64e6da15..00000000 --- a/magnum/conf/opts.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This is the single point of entry to generate the sample configuration -file for Magnum. It collects all the necessary info from the other modules -in this package. It is assumed that: - -* every other module in this package has a 'list_opts' function which - return a dict where - * the keys are strings which are the group names - * the value of each key is a list of config options for that group -* the magnum.conf package doesn't have further packages with config options -* this module is only used in the context of sample file generation -""" - -import collections -import importlib -import os -import pkgutil - -LIST_OPTS_FUNC_NAME = "list_opts" - - -def _tupleize(dct): - """Take the dict of options and convert to the 2-tuple format.""" - return [(key, val) for key, val in dct.items()] - - -def list_opts(): - opts = collections.defaultdict(list) - module_names = _list_module_names() - imported_modules = _import_modules(module_names) - _append_config_options(imported_modules, opts) - return _tupleize(opts) - - -def _list_module_names(): - module_names = [] - package_path = os.path.dirname(os.path.abspath(__file__)) - for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]): - if modname == "opts" or ispkg: - continue - else: - module_names.append(modname) - return module_names - - -def _import_modules(module_names): - imported_modules = [] - for modname in module_names: - mod = importlib.import_module("magnum.conf." + modname) - if not hasattr(mod, LIST_OPTS_FUNC_NAME): - msg = "The module 'magnum.conf.%s' should have a '%s' "\ - "function which returns the config options." % \ - (modname, LIST_OPTS_FUNC_NAME) - raise AttributeError(msg) - else: - imported_modules.append(mod) - return imported_modules - - -def _append_config_options(imported_modules, config_options): - for mod in imported_modules: - configs = mod.list_opts() - for key, val in configs.items(): - config_options[key].extend(val) diff --git a/magnum/conf/paths.py b/magnum/conf/paths.py deleted file mode 100644 index e9b6dc94..00000000 --- a/magnum/conf/paths.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_config import cfg - -path_opts = [ - cfg.StrOpt('pybasedir', - default=os.path.abspath(os.path.join(os.path.dirname(__file__), - '../')), - help='Directory where the magnum python module is installed.'), - cfg.StrOpt('bindir', - default='$pybasedir/bin', - help='Directory where magnum binaries are installed.'), - cfg.StrOpt('state_path', - default='$pybasedir', - help="Top-level directory for maintaining magnum's state."), -] - - -def basedir_def(*args): - """Return an uninterpolated path relative to $pybasedir.""" - return os.path.join('$pybasedir', *args) - - -def bindir_def(*args): - """Return an uninterpolated path relative to $bindir.""" - return os.path.join('$bindir', *args) - - -def state_path_def(*args): - """Return an uninterpolated path relative to $state_path.""" - return os.path.join('$state_path', *args) - - -def register_opts(conf): - conf.register_opts(path_opts) - - -def list_opts(): - return { - "DEFAULT": path_opts - } diff --git a/magnum/conf/profiler.py b/magnum/conf/profiler.py deleted file mode 100644 index d0f68c0f..00000000 --- a/magnum/conf/profiler.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import importutils - - -profiler_opts = importutils.try_import('osprofiler.opts') - - -def register_opts(conf): - if profiler_opts: - profiler_opts.set_defaults(conf) - - -def list_opts(): - return { - profiler_opts._profiler_opt_group: profiler_opts._PROFILER_OPTS - } diff --git a/magnum/conf/quota.py b/magnum/conf/quota.py deleted file mode 100644 index d1609767..00000000 --- a/magnum/conf/quota.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -quotas_group = cfg.OptGroup(name='quotas', - title='Options for quota configuration') - -quotas_def_opts = [ - cfg.IntOpt('max_clusters_per_project', - default=20, - help=_('Max number of clusters allowed per project. Admin can ' - 'override this default quota for a project by setting ' - 'explicit limit in quotas DB table (using /quotas REST ' - 'API endpoint).')), -] - - -def register_opts(conf): - conf.register_group(quotas_group) - conf.register_opts(quotas_def_opts, group=quotas_group) - - -def list_opts(): - return { - quotas_group: quotas_def_opts - } diff --git a/magnum/conf/rpc.py b/magnum/conf/rpc.py deleted file mode 100644 index 0bc1bb59..00000000 --- a/magnum/conf/rpc.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - - -periodic_opts = [ - cfg.BoolOpt('periodic_global_stack_list', - default=False, - help="List Heat stacks globally when syncing clusters. " - "Default is to do retrieve each cluster's stack " - "individually. Reduces number of requests against " - "Heat API if enabled but requires changes to Heat's " - "policy.json."), - cfg.BoolOpt('periodic_enable', - default=True, - help='Enable periodic tasks.'), - cfg.IntOpt('periodic_interval_max', - default=60, - help='Max interval size between periodic tasks execution in ' - 'seconds.'), -] - - -def register_opts(conf): - conf.register_opts(periodic_opts) - - -def list_opts(): - return { - "DEFAULT": periodic_opts - } diff --git a/magnum/conf/services.py b/magnum/conf/services.py deleted file mode 100644 index 9c8f4be6..00000000 --- a/magnum/conf/services.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -service_opts = [ - cfg.StrOpt('host', - help=_('Name of this node. This can be an opaque identifier. ' - 'It is not necessarily a hostname, FQDN, or IP address. ' - 'However, the node name must be valid within ' - 'an AMQP key, and if using ZeroMQ, a valid ' - 'hostname, FQDN, or IP address.')), -] - - -def register_opts(conf): - conf.register_opts(service_opts) - - -def list_opts(): - return { - "DEFAULT": service_opts - } diff --git a/magnum/conf/trust.py b/magnum/conf/trust.py deleted file mode 100644 index eb5c826e..00000000 --- a/magnum/conf/trust.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.i18n import _ - -trust_group = cfg.OptGroup(name='trust', - title='Trustee options for the magnum services') - -trust_opts = [ - cfg.BoolOpt('cluster_user_trust', - default=False, - help=_('This setting controls whether to assign a trust to' - ' the cluster user or not. You will need to set it to' - ' True for clusters with volume_driver=cinder or' - ' registry_enabled=true in the underlying cluster' - ' template to work. This is a potential security risk' - ' since the trust gives instances OpenStack API access' - " to the cluster's project. Note that this setting" - ' does not affect per-cluster trusts assigned to the' - 'Magnum service user.')), - cfg.StrOpt('trustee_domain_id', - help=_('Id of the domain to create trustee for clusters')), - cfg.StrOpt('trustee_domain_name', - help=_('Name of the domain to create trustee for s')), - cfg.StrOpt('trustee_domain_admin_id', - help=_('Id of the admin with roles sufficient to manage users' - ' in the trustee_domain')), - cfg.StrOpt('trustee_domain_admin_name', - help=_('Name of the admin with roles sufficient to manage users' - ' in the trustee_domain')), - cfg.StrOpt('trustee_domain_admin_domain_id', - help=_('Id of the domain admin user\'s domain.' - ' trustee_domain_id is used by default')), - cfg.StrOpt('trustee_domain_admin_domain_name', - help=_('Name of the domain admin user\'s domain.' - ' trustee_domain_name is used by default')), - cfg.StrOpt('trustee_domain_admin_password', secret=True, - help=_('Password of trustee_domain_admin')), - cfg.ListOpt('roles', - default=[], - help=_('The roles which are delegated to the trustee ' - 'by the trustor')), - cfg.StrOpt('trustee_keystone_interface', - default='public', - help=_('Auth interface used by instances/trustee')) -] - - -def register_opts(conf): - conf.register_group(trust_group) - conf.register_opts(trust_opts, group=trust_group) - - -def list_opts(): - return { - trust_group: trust_opts - } diff --git a/magnum/conf/utils.py b/magnum/conf/utils.py deleted file mode 100644 index 2165f5a4..00000000 --- a/magnum/conf/utils.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -from oslo_config import cfg - -from magnum.i18n import _ - -# Default symbols to use for passwords. Avoids visually confusing characters. -# ~6 bits per symbol -DEFAULT_PASSWORD_SYMBOLS = ['23456789', # Removed: 0,1 - 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O - 'abcdefghijkmnopqrstuvwxyz'] # Removed: l - -utils_opts = [ - cfg.StrOpt('rootwrap_config', - default="/etc/magnum/rootwrap.conf", - help='Path to the rootwrap configuration file to use for ' - 'running commands as root.'), - cfg.StrOpt('tempdir', - help='Explicitly specify the temporary working directory.'), - cfg.ListOpt('password_symbols', - default=DEFAULT_PASSWORD_SYMBOLS, - help='Symbols to use for passwords') -] - -periodic_opts = [ - cfg.IntOpt('service_down_time', - default=180, - help='Max interval size between periodic tasks execution in ' - 'seconds.'), -] - -urlfetch_opts = [ - cfg.IntOpt('max_manifest_size', - default=524288, - help=_('Maximum raw byte size of any manifest.')) -] - -ALL_OPTS = list(itertools.chain( - utils_opts, - periodic_opts, - urlfetch_opts -)) - - -def register_opts(conf): - conf.register_opts(ALL_OPTS) - - -def list_opts(): - return { - "DEFAULT": ALL_OPTS - } diff --git a/magnum/conf/x509.py b/magnum/conf/x509.py deleted file mode 100644 index f7ef578a..00000000 --- a/magnum/conf/x509.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from magnum.common.x509 import extensions -from magnum.i18n import _ - -ALLOWED_EXTENSIONS = ['"%s"' % e.value for e in extensions.Extensions] -DEFAULT_ALLOWED_EXTENSIONS = [ - extensions.Extensions.KEY_USAGE.value, - extensions.Extensions.EXTENDED_KEY_USAGE.value, - extensions.Extensions.SUBJECT_ALTERNATIVE_NAME.value, - extensions.Extensions.BASIC_CONSTRAINTS.value, - extensions.Extensions.SUBJECT_KEY_IDENTIFIER.value] - -ALLOWED_KEY_USAGE = ['"%s"' % e.value[0] for e in extensions.KeyUsages] -DEFAULT_ALLOWED_KEY_USAGE = [ - extensions.KeyUsages.DIGITAL_SIGNATURE.value[0], - extensions.KeyUsages.KEY_ENCIPHERMENT.value[0], - extensions.KeyUsages.CONTENT_COMMITMENT.value[0]] - -x509_group = cfg.OptGroup(name='x509', - title='Options for X509 in Magnum') - -x509_opts = [ - cfg.BoolOpt('allow_ca', - default=False, - help=_('Certificate can get the CA flag in x509 extensions.')), - cfg.ListOpt('allowed_extensions', - default=DEFAULT_ALLOWED_EXTENSIONS, - help=_('List of allowed x509 extensions. Available values: ' - '%s') % ', '.join(ALLOWED_EXTENSIONS)), - cfg.ListOpt('allowed_key_usage', - default=DEFAULT_ALLOWED_KEY_USAGE, - help=_('List of allowed x509 key usage. Available values: ' - '%s') % ', '.join(ALLOWED_KEY_USAGE)), - cfg.IntOpt('term_of_validity', - default=365 * 5, - help=_('Number of days for which a certificate is valid.')), - cfg.IntOpt('rsa_key_size', - default=2048, help=_('Size of generated private key. '))] - - -def register_opts(conf): - conf.register_group(x509_group) - conf.register_opts(x509_opts, group=x509_group) - - -def list_opts(): - return { - x509_group: x509_opts - } diff --git a/magnum/db/__init__.py b/magnum/db/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/db/api.py b/magnum/db/api.py deleted file mode 100644 index b2878de1..00000000 --- a/magnum/db/api.py +++ /dev/null @@ -1,446 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Base classes for storage engines -""" - -import abc - -from oslo_config import cfg -from oslo_db import api as db_api -import six - -from magnum.common import profiler - - -_BACKEND_MAPPING = {'sqlalchemy': 'magnum.db.sqlalchemy.api'} -IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, - lazy=True) - - -def get_instance(): - """Return a DB API instance.""" - return IMPL - - -@profiler.trace_cls("db") -@six.add_metaclass(abc.ABCMeta) -class Connection(object): - """Base class for storage system connections.""" - - @abc.abstractmethod - def __init__(self): - """Constructor.""" - - @abc.abstractmethod - def get_cluster_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None): - """Get matching clusters. - - Return a list of the specified columns for all clusters that match the - specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of clusters to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_cluster(self, values): - """Create a new cluster. - - :param values: A dict containing several items used to identify - and track the cluster, and several dicts which are - passed into the Drivers when managing this cluster. - For example: - - :: - - { - 'uuid': uuidutils.generate_uuid(), - 'name': 'example', - 'type': 'virt' - } - :returns: A cluster. - """ - - @abc.abstractmethod - def get_cluster_by_id(self, context, cluster_id): - """Return a cluster. - - :param context: The security context - :param cluster_id: The id of a cluster. - :returns: A cluster. - """ - - @abc.abstractmethod - def get_cluster_by_uuid(self, context, cluster_uuid): - """Return a cluster. - - :param context: The security context - :param cluster_uuid: The uuid of a cluster. - :returns: A cluster. - """ - - @abc.abstractmethod - def get_cluster_by_name(self, context, cluster_name): - """Return a cluster. - - :param context: The security context - :param cluster_name: The name of a cluster. - :returns: A cluster. - """ - - @abc.abstractmethod - def get_cluster_stats(self, context, project_id): - """Return clusters stats for the given project. - - :param context: The security context - :param project_id: The project id. - :returns: clusters, nodes count. - """ - - @abc.abstractmethod - def get_cluster_count_all(self, context, filters=None): - """Get count of matching clusters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :returns: Count of matching clusters. - """ - - @abc.abstractmethod - def destroy_cluster(self, cluster_id): - """Destroy a cluster and all associated interfaces. - - :param cluster_id: The id or uuid of a cluster. - """ - - @abc.abstractmethod - def update_cluster(self, cluster_id, values): - """Update properties of a cluster. - - :param cluster_id: The id or uuid of a cluster. - :returns: A cluster. - :raises: ClusterNotFound - """ - - @abc.abstractmethod - def get_cluster_template_list(self, context, filters=None, - limit=None, marker=None, sort_key=None, - sort_dir=None): - """Get matching ClusterTemplates. - - Return a list of the specified columns for all ClusterTemplates that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of ClusterTemplates to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_cluster_template(self, values): - """Create a new ClusterTemplate. - - :param values: A dict containing several items used to identify - and track the ClusterTemplate, and several dicts which - are passed into the Drivers when managing this - ClusterTemplate. - For example: - - :: - - { - 'uuid': uuidutils.generate_uuid(), - 'name': 'example', - 'type': 'virt' - } - :returns: A ClusterTemplate. - """ - - @abc.abstractmethod - def get_cluster_template_by_id(self, context, cluster_template_id): - """Return a ClusterTemplate. - - :param context: The security context - :param cluster_template_id: The id of a ClusterTemplate. - :returns: A ClusterTemplate. - """ - - @abc.abstractmethod - def get_cluster_template_by_uuid(self, context, cluster_template_uuid): - """Return a ClusterTemplate. - - :param context: The security context - :param cluster_template_uuid: The uuid of a ClusterTemplate. - :returns: A ClusterTemplate. - """ - - @abc.abstractmethod - def get_cluster_template_by_name(self, context, cluster_template_name): - """Return a ClusterTemplate. - - :param context: The security context - :param cluster_template_name: The name of a ClusterTemplate. - :returns: A ClusterTemplate. - """ - - @abc.abstractmethod - def destroy_cluster_template(self, cluster_template_id): - """Destroy a ClusterTemplate and all associated interfaces. - - :param cluster_template_id: The id or uuid of a ClusterTemplate. - """ - - @abc.abstractmethod - def update_cluster_template(self, cluster_template_id, values): - """Update properties of a ClusterTemplate. - - :param cluster_template_id: The id or uuid of a ClusterTemplate. - :returns: A ClusterTemplate. - :raises: ClusterTemplateNotFound - """ - - @abc.abstractmethod - def create_x509keypair(self, values): - """Create a new x509keypair. - - :param values: A dict containing several items used to identify - and track the x509keypair, and several dicts which - are passed into the Drivers when managing this - x509keypair. For example: - - :: - - { - 'uuid': uuidutils.generate_uuid(), - 'certificate': 'AAA...', - 'private_key': 'BBB...', - 'private_key_passphrase': 'CCC...', - 'intermediates': 'DDD...', - } - :returns: A X509KeyPair. - """ - - @abc.abstractmethod - def get_x509keypair_by_id(self, context, x509keypair_id): - """Return a x509keypair. - - :param context: The security context - :param x509keypair_id: The id of a x509keypair. - :returns: A x509keypair. - """ - - @abc.abstractmethod - def get_x509keypair_by_uuid(self, context, x509keypair_uuid): - """Return a x509keypair. - - :param context: The security context - :param x509keypair_uuid: The uuid of a x509keypair. - :returns: A x509keypair. - """ - - @abc.abstractmethod - def destroy_x509keypair(self, x509keypair_id): - """Destroy a x509keypair. - - :param x509keypair_id: The id or uuid of a x509keypair. - """ - - @abc.abstractmethod - def update_x509keypair(self, x509keypair_id, values): - """Update properties of a X509KeyPair. - - :param x509keypair_id: The id or uuid of a X509KeyPair. - :returns: A X509KeyPair. - """ - - @abc.abstractmethod - def get_x509keypair_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None): - """Get matching x509keypairs. - - Return a list of the specified columns for all x509keypairs - that match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of x509keypairs to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def destroy_magnum_service(self, magnum_service_id): - """Destroys a magnum_service record. - - :param magnum_service_id: The id of a magnum_service. - """ - - @abc.abstractmethod - def update_magnum_service(self, magnum_service_id, values): - """Update properties of a magnum_service. - - :param magnum_service_id: The id of a magnum_service record. - """ - - @abc.abstractmethod - def get_magnum_service_by_host_and_binary(self, host, binary): - """Return a magnum_service record. - - :param host: The host where the binary is located. - :param binary: The name of the binary. - :returns: A magnum_service record. - """ - - @abc.abstractmethod - def create_magnum_service(self, values): - """Create a new magnum_service record. - - :param values: A dict containing several items used to identify - and define the magnum_service record. - :returns: A magnum_service record. - """ - - @abc.abstractmethod - def get_magnum_service_list(self, disabled=None, limit=None, - marker=None, sort_key=None, sort_dir=None): - """Get matching magnum_service records. - - Return a list of the specified columns for all magnum_services - those match the specified filters. - - :param disabled: Filters disbaled services. Defaults to None. - :param limit: Maximum number of magnum_services to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_quota(self, values): - """Create a new Quota record for a resource in a project. - - :param values: A dict containing several items used to identify - and track quota for a resource in a project. - - :: - - { - 'id': uuidutils.generate_uuid(), - 'project_id': 'fake_project', - 'resource': 'fake_resource', - 'hard_limit': 'fake_hardlimit', - } - - :returns: A quota record. - """ - - @abc.abstractmethod - def update_quota(self, project_id, values): - """Update quota record. - - :param project_id: The project id. - :param values: A dict containing several items used to identify - and track quota for a resource in a project. - - :: - - { - 'id': uuidutils.generate_uuid(), - 'project_id': 'fake_project', - 'resource': 'fake_resource', - 'hard_limit': 'fake_hardlimit', - } - :returns: A quota record. - """ - - @abc.abstractmethod - def delete_quota(self, project_id, resource): - """Delete a quota. - - :param project_id: Project id. - :param resource: resource name. - """ - - @abc.abstractmethod - def get_quota_by_id(self, context, quota_id): - """Return a quota. - - :param context: The security context - :param quota_id: The id of a quota. - :returns: A quota. - """ - - @abc.abstractmethod - def get_quota_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None): - """Get quota list. - - Return a list of the specified columns for all quotas that match the - specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of clusters to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def quota_get_all_by_project_id(self, project_id): - """Gets Quota record for all the resources in a project. - - :param project_id: Project identifier of the project. - - :returns: Quota record for all resources in a project. - """ - - @abc.abstractmethod - def get_quota_by_project_id_resource(self, project_id, resource): - """Gets quota record for the given quota id. - - :param project_id: project id. - :param resource: resource name. - - :returns: Quota record. - """ diff --git a/magnum/db/migration.py b/magnum/db/migration.py deleted file mode 100644 index 2a93f1c5..00000000 --- a/magnum/db/migration.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Database setup and migration commands.""" - -from stevedore import driver - -import magnum.conf - -CONF = magnum.conf.CONF -_IMPL = None - - -def get_backend(): - global _IMPL - if not _IMPL: - _IMPL = driver.DriverManager("magnum.database.migration_backend", - CONF.database.backend).driver - return _IMPL - - -def upgrade(version=None): - """Migrate the database to `version` or the most recent version.""" - return get_backend().upgrade(version) - - -def version(): - return get_backend().version() - - -def stamp(version): - return get_backend().stamp(version) - - -def revision(message, autogenerate): - return get_backend().revision(message, autogenerate) diff --git a/magnum/db/sqlalchemy/__init__.py b/magnum/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/db/sqlalchemy/alembic.ini b/magnum/db/sqlalchemy/alembic.ini deleted file mode 100644 index a7689803..00000000 --- a/magnum/db/sqlalchemy/alembic.ini +++ /dev/null @@ -1,54 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = %(here)s/alembic - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# max length of characters to apply to the -# "slug" field -#truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -#sqlalchemy.url = driver://user:pass@localhost/dbname - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/magnum/db/sqlalchemy/alembic/README b/magnum/db/sqlalchemy/alembic/README deleted file mode 100644 index c9526a8b..00000000 --- a/magnum/db/sqlalchemy/alembic/README +++ /dev/null @@ -1,11 +0,0 @@ -Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation - -To create alembic migrations use: -$ magnum-db-manage revision --message "description of revision" --autogenerate - -Stamp db with most recent migration version, without actually running migrations -$ magnum-db-manage stamp head - -Upgrade can be performed by: -$ magnum-db-manage upgrade -$ magnum-db-manage upgrade head diff --git a/magnum/db/sqlalchemy/alembic/env.py b/magnum/db/sqlalchemy/alembic/env.py deleted file mode 100644 index ff264b76..00000000 --- a/magnum/db/sqlalchemy/alembic/env.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from logging import config as log_config - -from alembic import context - -from magnum.db.sqlalchemy import api as sqla_api -from magnum.db.sqlalchemy import models - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -log_config.fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -target_metadata = models.Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - engine = sqla_api.get_engine() - with engine.connect() as connection: - context.configure(connection=connection, - target_metadata=target_metadata) - with context.begin_transaction(): - context.run_migrations() - - -run_migrations_online() diff --git a/magnum/db/sqlalchemy/alembic/script.py.mako b/magnum/db/sqlalchemy/alembic/script.py.mako deleted file mode 100644 index 3b1c960c..00000000 --- a/magnum/db/sqlalchemy/alembic/script.py.mako +++ /dev/null @@ -1,18 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} diff --git a/magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baymodel.py b/magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baymodel.py deleted file mode 100644 index 793b303e..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/049f81f6f584_remove_ssh_authorized_key_from_baymodel.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2016 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""remove_ssh_authorized_key_from_baymodel - -Revision ID: 049f81f6f584 -Revises: ee92b41b8809 -Create Date: 2016-02-28 15:27:26.211244 - -""" - -# revision identifiers, used by Alembic. -revision = '049f81f6f584' -down_revision = 'ee92b41b8809' - -from alembic import op - - -def upgrade(): - op.drop_column('baymodel', 'ssh_authorized_key') diff --git a/magnum/db/sqlalchemy/alembic/versions/05d3e97de9ee_add_volume_driver.py b/magnum/db/sqlalchemy/alembic/versions/05d3e97de9ee_add_volume_driver.py deleted file mode 100644 index 62c6f0f3..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/05d3e97de9ee_add_volume_driver.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add volume driver - -Revision ID: 05d3e97de9ee -Revises: 57fbdf2327a2 -Create Date: 2016-01-12 06:21:24.880838 - -""" - -# revision identifiers, used by Alembic. -revision = '05d3e97de9ee' -down_revision = '57fbdf2327a2' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('volume_driver', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/085e601a39f6_remove_service.py b/magnum/db/sqlalchemy/alembic/versions/085e601a39f6_remove_service.py deleted file mode 100644 index fee6c0c2..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/085e601a39f6_remove_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""remove service object - -Revision ID: 085e601a39f6 -Revises: a1136d335540 -Create Date: 2016-05-25 12:05:30.790282 - -""" - -# revision identifiers, used by Alembic. -revision = '085e601a39f6' -down_revision = 'a1136d335540' - -from alembic import op - - -def upgrade(): - op.drop_table('service') diff --git a/magnum/db/sqlalchemy/alembic/versions/14328d6a57e3_add_master_count_to_bay.py b/magnum/db/sqlalchemy/alembic/versions/14328d6a57e3_add_master_count_to_bay.py deleted file mode 100644 index d2edb4bb..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/14328d6a57e3_add_master_count_to_bay.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add master count to bay - -Revision ID: 14328d6a57e3 -Revises: 53882537ac57 -Create Date: 2015-07-29 16:00:38.721016 - -""" - -# revision identifiers, used by Alembic. -revision = '14328d6a57e3' -down_revision = '53882537ac57' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('bay', - sa.Column('master_count', sa.Integer(), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table.py b/magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table.py deleted file mode 100644 index 681393d7..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/1481f5b560dd_add_labels_column_to_baymodel_table.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add labels column to baymodel table - -Revision ID: 1481f5b560dd -Revises: 3be65537a94a -Create Date: 2015-09-02 22:34:07.590142 - -""" - -# revision identifiers, used by Alembic. -revision = '1481f5b560dd' -down_revision = '3be65537a94a' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('labels', - sa.Text(), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/156ceb17fb0a_add_bay_status_reason.py b/magnum/db/sqlalchemy/alembic/versions/156ceb17fb0a_add_bay_status_reason.py deleted file mode 100644 index 75310cb4..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/156ceb17fb0a_add_bay_status_reason.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add_bay_status_reason - -Revision ID: 156ceb17fb0a -Revises: 59e7664a8ba1 -Create Date: 2015-05-30 11:34:57.847071 - -""" - -# revision identifiers, used by Alembic. -revision = '156ceb17fb0a' -down_revision = '59e7664a8ba1' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('bay', - sa.Column('status_reason', sa.Text, nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/1afee1db6cd0_add_master_flavor.py b/magnum/db/sqlalchemy/alembic/versions/1afee1db6cd0_add_master_flavor.py deleted file mode 100644 index a521f0ae..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/1afee1db6cd0_add_master_flavor.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Add master flavor - -Revision ID: 1afee1db6cd0 -Revises: 3a938526b35d -Create Date: 2015-02-27 14:53:38.042900 - -""" - -# revision identifiers, used by Alembic. -revision = '1afee1db6cd0' -down_revision = '35cff7c86221' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('master_flavor_id', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/1c1ff5e56048_rename_container_image_id.py b/magnum/db/sqlalchemy/alembic/versions/1c1ff5e56048_rename_container_image_id.py deleted file mode 100644 index d7113afc..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/1c1ff5e56048_rename_container_image_id.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""rename_container_image_id - -Revision ID: 1c1ff5e56048 -Revises: 156ceb17fb0a -Create Date: 2015-06-18 10:21:40.991734 - -""" - -# revision identifiers, used by Alembic. -revision = '1c1ff5e56048' -down_revision = '156ceb17fb0a' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.alter_column('container', 'image_id', - new_column_name='image', - existing_type=sa.String(255)) diff --git a/magnum/db/sqlalchemy/alembic/versions/1d045384b966_add_insecure_baymodel_attr.py b/magnum/db/sqlalchemy/alembic/versions/1d045384b966_add_insecure_baymodel_attr.py deleted file mode 100644 index c971bfac..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/1d045384b966_add_insecure_baymodel_attr.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add-insecure-baymodel-attr - -Revision ID: 1d045384b966 -Revises: 1481f5b560dd -Create Date: 2015-09-23 18:17:10.195121 - -""" - -# revision identifiers, used by Alembic. -revision = '1d045384b966' -down_revision = '1481f5b560dd' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - insecure_column = sa.Column('insecure', sa.Boolean(), default=False) - op.add_column('baymodel', insecure_column) - baymodel = sa.sql.table('baymodel', insecure_column) - op.execute( - baymodel.update().values({'insecure': True}) - ) diff --git a/magnum/db/sqlalchemy/alembic/versions/1f196a3dabae_remove_container.py b/magnum/db/sqlalchemy/alembic/versions/1f196a3dabae_remove_container.py deleted file mode 100644 index 15413814..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/1f196a3dabae_remove_container.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""remove container object - -Revision ID: 1f196a3dabae -Revises: e0653b2d5271 -Create Date: 2016-06-02 11:42:42.200992 - -""" - -# revision identifiers, used by Alembic. -revision = '1f196a3dabae' -down_revision = 'e0653b2d5271' - -from alembic import op - - -def upgrade(): - op.drop_table('container') diff --git a/magnum/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py b/magnum/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py deleted file mode 100644 index d71e2092..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py +++ /dev/null @@ -1,137 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""initial migration - -Revision ID: 2581ebaf0cb2 -Revises: None -Create Date: 2014-01-17 12:14:07.754448 - -""" - -# revision identifiers, used by Alembic. -revision = '2581ebaf0cb2' -down_revision = None - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - # commands auto generated by Alembic - please adjust! - op.create_table( - 'bay', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('baymodel_id', sa.String(length=255), nullable=True), - sa.Column('node_count', sa.Integer(), nullable=True), - sa.Column('master_address', sa.String(length=255), nullable=True), - sa.Column('minions_address', sa.Text(), nullable=True), - sa.Column('stack_id', sa.String(length=255), nullable=True), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - op.create_table( - 'baymodel', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('flavor_id', sa.String(length=255), nullable=True), - sa.Column('keypair_id', sa.String(length=255), nullable=True), - sa.Column('image_id', sa.String(length=255), nullable=True), - sa.Column('external_network_id', sa.String(length=255), nullable=True), - sa.Column('dns_nameserver', sa.String(length=255), nullable=True), - sa.Column('apiserver_port', sa.Integer(), nullable=True), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - op.create_table( - 'container', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('image_id', sa.String(length=255), nullable=True), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - op.create_table( - 'node', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('type', sa.String(length=20), nullable=True), - sa.Column('image_id', sa.String(length=255), nullable=True), - sa.Column('ironic_node_id', sa.String(length=36), nullable=True), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - op.create_table( - 'pod', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('desc', sa.String(length=255), nullable=True), - sa.Column('bay_uuid', sa.String(length=36), nullable=True), - sa.Column('images', sa.Text(), nullable=False), - sa.Column('labels', sa.Text(), nullable=True), - sa.Column('status', sa.String(length=255), nullable=False), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - op.create_table( - 'service', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('bay_uuid', sa.String(length=36), nullable=True), - sa.Column('labels', sa.Text, nullable=True), - sa.Column('selector', sa.Text, nullable=True), - sa.Column('ip', sa.String(length=36), nullable=True), - sa.Column('port', sa.Integer(), nullable=True), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - op.create_table( - 'replicationcontroller', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('bay_uuid', sa.String(length=36), nullable=True), - sa.Column('images', sa.Text(), nullable=False), - sa.Column('labels', sa.Text(), nullable=True), - sa.Column('replicas', sa.Integer(), nullable=True), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - # end Alembic commands diff --git a/magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality.py b/magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality.py deleted file mode 100644 index 401d454e..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/27ad304554e2_adding_magnum_service_functionality.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""adding magnum_service functionality - -Revision ID: 27ad304554e2 -Revises: 1d045384b966 -Create Date: 2015-09-01 18:27:14.371860 - -""" - -# revision identifiers, used by Alembic. -revision = '27ad304554e2' -down_revision = '1d045384b966' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'magnum_service', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('report_count', sa.Integer(), nullable=False), - sa.Column('host', sa.String(length=255), nullable=True), - sa.Column('binary', sa.String(length=255), nullable=True), - sa.Column('disabled', sa.Boolean(), nullable=True), - sa.Column('disabled_reason', sa.String(length=255), nullable=True), - # 'last_seen_up' has different purpose than 'updated_at'. - # 'updated_at' refers to any modification of the entry, which can - # be administrative too, whereas 'last_seen_up' is more related to - # magnum_service. Modeled after nova/servicegroup - sa.Column('last_seen_up', sa.DateTime(), nullable=True), - sa.Column('forced_down', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('host', 'binary', - name='uniq_magnum_service0host0binary') - ) diff --git a/magnum/db/sqlalchemy/alembic/versions/29affeaa2bc2_rename_bay_master_address.py b/magnum/db/sqlalchemy/alembic/versions/29affeaa2bc2_rename_bay_master_address.py deleted file mode 100644 index 219c37e7..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/29affeaa2bc2_rename_bay_master_address.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""rename-bay-master-address - -Revision ID: 29affeaa2bc2 -Revises: 2d1354bbf76e -Create Date: 2015-03-25 16:06:08.148629 - -""" - -# revision identifiers, used by Alembic. -revision = '29affeaa2bc2' -down_revision = '2d1354bbf76e' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.alter_column('bay', 'master_address', - new_column_name='api_address', - existing_type=sa.String(255)) diff --git a/magnum/db/sqlalchemy/alembic/versions/2ace4006498_rename_bay_minions_address.py b/magnum/db/sqlalchemy/alembic/versions/2ace4006498_rename_bay_minions_address.py deleted file mode 100644 index c9d1fefa..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/2ace4006498_rename_bay_minions_address.py +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""rename-bay-minions-address - -Revision ID: 2ace4006498 -Revises: 29affeaa2bc2 -Create Date: 2015-03-27 15:15:36.309601 - -""" - -# revision identifiers, used by Alembic. -revision = '2ace4006498' -down_revision = '29affeaa2bc2' - -from alembic import op - -from magnum.db.sqlalchemy import models - - -def upgrade(): - op.alter_column('bay', 'minions_address', - new_column_name='node_addresses', - existing_type=models.JSONEncodedList()) diff --git a/magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table.py b/magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table.py deleted file mode 100644 index 1de4ade2..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/2ae93c9c6191_add_public_column_to_baymodel_table.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2015 Rackspace US, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add public column to baymodel table - -Revision ID: 2ae93c9c6191 -Revises: 5ad410481b88 -Create Date: 2015-09-30 15:33:44.514290 - -""" - -# revision identifiers, used by Alembic. -revision = '2ae93c9c6191' -down_revision = '5ad410481b88' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('public', sa.Boolean(), default=False)) diff --git a/magnum/db/sqlalchemy/alembic/versions/2b5f24dd95de_rename_service_port.py b/magnum/db/sqlalchemy/alembic/versions/2b5f24dd95de_rename_service_port.py deleted file mode 100644 index a43d8258..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/2b5f24dd95de_rename_service_port.py +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""rename service port - -Revision ID: 2b5f24dd95de -Revises: 592131657ca1 -Create Date: 2015-04-29 05:52:52.204095 - -""" - -# revision identifiers, used by Alembic. -revision = '2b5f24dd95de' -down_revision = '3b6c4c42adb4' - -from alembic import op - -from magnum.db.sqlalchemy import models - - -def upgrade(): - op.alter_column('service', 'port', - new_column_name='ports', - existing_type=models.JSONEncodedList()) diff --git a/magnum/db/sqlalchemy/alembic/versions/2d1354bbf76e_ssh_authorized_key.py b/magnum/db/sqlalchemy/alembic/versions/2d1354bbf76e_ssh_authorized_key.py deleted file mode 100644 index c137d0ec..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/2d1354bbf76e_ssh_authorized_key.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""ssh authorized key - -Revision ID: 2d1354bbf76e -Revises: 1afee1db6cd0 -Create Date: 2015-03-13 14:05:58.744652 - -""" - -# revision identifiers, used by Alembic. -revision = '2d1354bbf76e' -down_revision = '1afee1db6cd0' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('ssh_authorized_key', - sa.Text, nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/2d8657c0cdc_add_bay_uuid.py b/magnum/db/sqlalchemy/alembic/versions/2d8657c0cdc_add_bay_uuid.py deleted file mode 100644 index 0fd35d73..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/2d8657c0cdc_add_bay_uuid.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add bay uuid - -Revision ID: 2d8657c0cdc -Revises: e772b2598d9 -Create Date: 2015-04-22 16:59:06.799384 - -""" - -# revision identifiers, used by Alembic. -revision = '2d8657c0cdc' -down_revision = 'e772b2598d9' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('container', sa.Column('bay_uuid', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/33ef79969018_add_memory_to_container.py b/magnum/db/sqlalchemy/alembic/versions/33ef79969018_add_memory_to_container.py deleted file mode 100644 index e955f7ff..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/33ef79969018_add_memory_to_container.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Add memory to container - -Revision ID: 33ef79969018 -Revises: 2ae93c9c6191 -Create Date: 2015-10-03 17:03:47.194253 - -""" - -# revision identifiers, used by Alembic. -revision = '33ef79969018' -down_revision = '2ae93c9c6191' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('container', - sa.Column('memory', sa.String(length=255), - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/35cff7c86221_add_private_network_to_baymodel.py b/magnum/db/sqlalchemy/alembic/versions/35cff7c86221_add_private_network_to_baymodel.py deleted file mode 100644 index 2cd4551c..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/35cff7c86221_add_private_network_to_baymodel.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add private network to baymodel - -Revision ID: 35cff7c86221 -Revises: 3a938526b35d -Create Date: 2015-02-26 05:02:34.260099 - -""" - -# revision identifiers, used by Alembic. -revision = '35cff7c86221' -down_revision = '3a938526b35d' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('fixed_network', sa.String(length=255), - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/3a938526b35d_add_docker_volume_size.py b/magnum/db/sqlalchemy/alembic/versions/3a938526b35d_add_docker_volume_size.py deleted file mode 100644 index 2f6837e8..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/3a938526b35d_add_docker_volume_size.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Add docker volume size column - -Revision ID: 3a938526b35d -Revises: 5793cd26898d -Create Date: 2015-02-23 14:32:00.086650 - -""" - -# revision identifiers, used by Alembic. -revision = '3a938526b35d' -down_revision = '5793cd26898d' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('docker_volume_size', sa.Integer(), - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py b/magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py deleted file mode 100644 index 7be0f370..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Add unique constraints - -Revision ID: 3b6c4c42adb4 -Revises: 592131657ca1 -Create Date: 2015-05-05 09:45:44.657047 - -""" - -# revision identifiers, used by Alembic. -revision = '3b6c4c42adb4' -down_revision = '592131657ca1' - -from alembic import op - - -def upgrade(): - op.create_unique_constraint("uniq_bay0uuid", "bay", ["uuid"]) - op.create_unique_constraint("uniq_baylock0bay_uuid", "baylock", - ["bay_uuid"]) - op.create_unique_constraint("uniq_baymodel0uuid", "baymodel", ["uuid"]) - op.create_unique_constraint("uniq_container0uuid", "container", ["uuid"]) - op.create_unique_constraint("uniq_node0uuid", "node", ["uuid"]) - op.create_unique_constraint("uniq_node0ironic_node_id", "node", - ["ironic_node_id"]) - op.create_unique_constraint("uniq_pod0uuid", "pod", ["uuid"]) - op.create_unique_constraint("uniq_service0uuid", "service", ["uuid"]) - op.create_unique_constraint("uniq_replicationcontroller0uuid", - "replicationcontroller", ["uuid"]) diff --git a/magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.py b/magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.py deleted file mode 100644 index df8d293e..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/3be65537a94a_add_network_driver_baymodel_column.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add_network_driver_baymodel_column - -Revision ID: 3be65537a94a -Revises: 4e263f236334 -Create Date: 2015-09-03 20:51:54.229436 - -""" - -# revision identifiers, used by Alembic. -revision = '3be65537a94a' -down_revision = '4e263f236334' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('network_driver', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/3bea56f25597_multi_tenant.py b/magnum/db/sqlalchemy/alembic/versions/3bea56f25597_multi_tenant.py deleted file mode 100644 index 754aa2ec..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/3bea56f25597_multi_tenant.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Multi Tenant Support - -Revision ID: 3bea56f25597 -Revises: 2581ebaf0cb2 -Create Date: 2015-01-22 22:22:22.150632 - -""" - -# revision identifiers, used by Alembic. -revision = '3bea56f25597' -down_revision = '2581ebaf0cb2' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('bay', sa.Column('project_id', sa.String(length=255), - nullable=True)) - op.add_column('bay', sa.Column('user_id', sa.String(length=255), - nullable=True)) - op.add_column('baymodel', sa.Column('project_id', sa.String(length=255), - nullable=True)) - op.add_column('baymodel', sa.Column('user_id', sa.String(length=255), - nullable=True)) - op.add_column('container', sa.Column('project_id', sa.String(length=255), - nullable=True)) - op.add_column('container', sa.Column('user_id', sa.String(length=255), - nullable=True)) - op.add_column('node', sa.Column('project_id', sa.String(length=255), - nullable=True)) - op.add_column('node', sa.Column('user_id', sa.String(length=255), - nullable=True)) - op.add_column('pod', sa.Column('project_id', sa.String(length=255), - nullable=True)) - op.add_column('pod', sa.Column('user_id', sa.String(length=255), - nullable=True)) - op.add_column('service', sa.Column('project_id', sa.String(length=255), - nullable=True)) - op.add_column('service', sa.Column('user_id', sa.String(length=255), - nullable=True)) - op.add_column('replicationcontroller', sa.Column('project_id', - sa.String(length=255), nullable=True)) - op.add_column('replicationcontroller', sa.Column('user_id', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/40f325033343_add_bay_create_timeout_to_bay.py b/magnum/db/sqlalchemy/alembic/versions/40f325033343_add_bay_create_timeout_to_bay.py deleted file mode 100644 index ba080360..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/40f325033343_add_bay_create_timeout_to_bay.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add bay_create_timeout to bay - -Revision ID: 40f325033343 -Revises: 5977879072a7 -Create Date: 2015-12-02 16:38:54.697413 - -""" - -# revision identifiers, used by Alembic. -revision = '40f325033343' -down_revision = '5977879072a7' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('bay', sa.Column('bay_create_timeout', - sa.Integer(), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/417917e778f5_add_server_type_to_baymodel.py b/magnum/db/sqlalchemy/alembic/versions/417917e778f5_add_server_type_to_baymodel.py deleted file mode 100644 index 38ab3878..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/417917e778f5_add_server_type_to_baymodel.py +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Add server_type column to baymodel - -Revision ID: 417917e778f5 -Revises: 33ef79969018 -Create Date: 2015-10-14 16:21:57.229436 - -""" - -# revision identifiers, used by Alembic. -revision = '417917e778f5' -down_revision = '33ef79969018' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('server_type', - sa.String(length=255), nullable=True, - server_default='vm')) diff --git a/magnum/db/sqlalchemy/alembic/versions/421102d1f2d2_create_x509keypair_table.py b/magnum/db/sqlalchemy/alembic/versions/421102d1f2d2_create_x509keypair_table.py deleted file mode 100644 index e21e9367..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/421102d1f2d2_create_x509keypair_table.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""create x509keypair table - -Revision ID: 421102d1f2d2 -Revises: 14328d6a57e3 -Create Date: 2015-07-17 13:12:12.653241 - -""" - -# revision identifiers, used by Alembic. -revision = '421102d1f2d2' -down_revision = '14328d6a57e3' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'x509keypair', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('bay_uuid', sa.String(length=36), nullable=True), - sa.Column('ca_cert', sa.Text()), - sa.Column('certificate', sa.Text()), - sa.Column('private_key', sa.Text()), - sa.Column('project_id', sa.String(length=255), nullable=True), - sa.Column('user_id', sa.String(length=255), nullable=True), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - op.create_unique_constraint("uniq_x509keypair0uuid", - "x509keypair", ["uuid"]) diff --git a/magnum/db/sqlalchemy/alembic/versions/456126c6c9e9_create_baylock_table.py b/magnum/db/sqlalchemy/alembic/versions/456126c6c9e9_create_baylock_table.py deleted file mode 100644 index 37c26a50..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/456126c6c9e9_create_baylock_table.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""create baylock table - -Revision ID: 456126c6c9e9 -Revises: 2ace4006498 -Create Date: 2015-04-01 15:04:45.652672 - -""" - -# revision identifiers, used by Alembic. -revision = '456126c6c9e9' -down_revision = '2ace4006498' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'baylock', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('bay_uuid', sa.String(length=36), nullable=True), - sa.Column('conductor_id', sa.String(length=64), nullable=True), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) diff --git a/magnum/db/sqlalchemy/alembic/versions/4956f03cabad_add_cluster_distro.py b/magnum/db/sqlalchemy/alembic/versions/4956f03cabad_add_cluster_distro.py deleted file mode 100644 index 01891f02..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/4956f03cabad_add_cluster_distro.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add cluster distro - -Revision ID: 4956f03cabad -Revises: 2d8657c0cdc -Create Date: 2015-04-25 02:17:51.486547 - -""" - -# revision identifiers, used by Alembic. -revision = '4956f03cabad' -down_revision = '2d8657c0cdc' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('cluster_distro', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/4e263f236334_add_registry_enabled.py b/magnum/db/sqlalchemy/alembic/versions/4e263f236334_add_registry_enabled.py deleted file mode 100644 index a3c508cb..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/4e263f236334_add_registry_enabled.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Add registry_enabled - -Revision ID: 4e263f236334 -Revises: 5518af8dbc21 -Create Date: 2015-09-14 18:39:25.871218 - -""" - -# revision identifiers, used by Alembic. -revision = '4e263f236334' -down_revision = '5518af8dbc21' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('registry_enabled', - sa.Boolean(), default=False)) diff --git a/magnum/db/sqlalchemy/alembic/versions/4ea34a59a64c_add_discovery_url_to_bay.py b/magnum/db/sqlalchemy/alembic/versions/4ea34a59a64c_add_discovery_url_to_bay.py deleted file mode 100644 index bf8738d4..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/4ea34a59a64c_add_discovery_url_to_bay.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add-discovery-url-to-bay - -Revision ID: 4ea34a59a64c -Revises: 456126c6c9e9 -Create Date: 2015-04-14 18:56:03.440329 - -""" - -# revision identifiers, used by Alembic. -revision = '4ea34a59a64c' -down_revision = '456126c6c9e9' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('bay', - sa.Column('discovery_url', sa.String(length=255), - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/53882537ac57_add_host_column_to_pod.py b/magnum/db/sqlalchemy/alembic/versions/53882537ac57_add_host_column_to_pod.py deleted file mode 100644 index 8056a3f2..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/53882537ac57_add_host_column_to_pod.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add host column to pod - -Revision ID: 53882537ac57 -Revises: 1c1ff5e56048 -Create Date: 2015-06-25 16:52:47.159887 - -""" - -# revision identifiers, used by Alembic. -revision = '53882537ac57' -down_revision = '1c1ff5e56048' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('pod', - sa.Column('host', sa.Text, nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/5518af8dbc21_rename_cert_uuid.py b/magnum/db/sqlalchemy/alembic/versions/5518af8dbc21_rename_cert_uuid.py deleted file mode 100644 index b0285596..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/5518af8dbc21_rename_cert_uuid.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Rename cert_uuid - -Revision ID: 5518af8dbc21 -Revises: 6f21dc920bb -Create Date: 2015-08-28 13:13:19.747625 - -""" - -# revision identifiers, used by Alembic. -revision = '5518af8dbc21' -down_revision = '6f21dc920bb' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.alter_column('bay', 'ca_cert_uuid', - new_column_name='ca_cert_ref', - existing_type=sa.String(length=36), - type_=sa.String(length=512), - nullable=True) - op.alter_column('bay', 'magnum_cert_uuid', - new_column_name='magnum_cert_ref', - existing_type=sa.String(length=36), - type_=sa.String(length=512), - nullable=True) diff --git a/magnum/db/sqlalchemy/alembic/versions/5793cd26898d_add_bay_status.py b/magnum/db/sqlalchemy/alembic/versions/5793cd26898d_add_bay_status.py deleted file mode 100644 index 59f0df45..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/5793cd26898d_add_bay_status.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Add bay status - -Revision ID: 5793cd26898d -Revises: 3bea56f25597 -Create Date: 2015-02-09 12:54:09.449948 - -""" - -# revision identifiers, used by Alembic. -revision = '5793cd26898d' -down_revision = '3bea56f25597' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('bay', sa.Column('status', sa.String(length=20), - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/57fbdf2327a2_remove_baylock.py b/magnum/db/sqlalchemy/alembic/versions/57fbdf2327a2_remove_baylock.py deleted file mode 100644 index 492e52dc..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/57fbdf2327a2_remove_baylock.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""remove baylock - -Revision ID: 57fbdf2327a2 -Revises: adc3b7679ae -Create Date: 2015-12-17 09:27:18.429773 - -""" - -# revision identifiers, used by Alembic. -revision = '57fbdf2327a2' -down_revision = 'adc3b7679ae' - -from alembic import op - - -def upgrade(): - op.drop_table('baylock') diff --git a/magnum/db/sqlalchemy/alembic/versions/592131657ca1_add_coe_column_to_baymodel.py b/magnum/db/sqlalchemy/alembic/versions/592131657ca1_add_coe_column_to_baymodel.py deleted file mode 100644 index 5fa5159c..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/592131657ca1_add_coe_column_to_baymodel.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Add coe column to BayModel - -Revision ID: 592131657ca1 -Revises: 4956f03cabad -Create Date: 2015-04-17 14:20:17.620995 - -""" - -# revision identifiers, used by Alembic. -revision = '592131657ca1' -down_revision = '4956f03cabad' - -from alembic import op - -import magnum.conf -import sqlalchemy as sa - -CONF = magnum.conf.CONF - - -def upgrade(): - op.add_column('baymodel', sa.Column('coe', sa.String(length=255), - nullable=True)) - - baymodel = sa.sql.table('baymodel', - sa.sql.column('coe', sa.String(length=255))) - - op.execute( - baymodel.update().values({ - 'coe': op.inline_literal("kubernetes")}) - ) diff --git a/magnum/db/sqlalchemy/alembic/versions/5977879072a7_add_env_to_container.py b/magnum/db/sqlalchemy/alembic/versions/5977879072a7_add_env_to_container.py deleted file mode 100644 index 505f366a..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/5977879072a7_add_env_to_container.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add-env-to-container - -Revision ID: 5977879072a7 -Revises: 417917e778f5 -Create Date: 2015-11-26 04:10:39.462966 - -""" - -# revision identifiers, used by Alembic. -revision = '5977879072a7' -down_revision = '417917e778f5' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('container', sa.Column('environment', - sa.Text(), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/59e7664a8ba1_add_container_status.py b/magnum/db/sqlalchemy/alembic/versions/59e7664a8ba1_add_container_status.py deleted file mode 100644 index 335bda68..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/59e7664a8ba1_add_container_status.py +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add_container_status - -Revision ID: 59e7664a8ba1 -Revises: 2b5f24dd95de -Create Date: 2015-05-11 11:33:23.125790 - -""" - -# revision identifiers, used by Alembic. -revision = '59e7664a8ba1' -down_revision = '2b5f24dd95de' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('container', - sa.Column('status', sa.String(length=20), - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/5ad410481b88_rename_insecure.py b/magnum/db/sqlalchemy/alembic/versions/5ad410481b88_rename_insecure.py deleted file mode 100644 index 2184c26c..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/5ad410481b88_rename_insecure.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""rename-insecure - -Revision ID: 5ad410481b88 -Revises: 27ad304554e2 -Create Date: 2015-09-29 17:51:10.195121 - -""" - -# revision identifiers, used by Alembic. -revision = '5ad410481b88' -down_revision = '27ad304554e2' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.alter_column('baymodel', 'insecure', - new_column_name='tls_disabled', - existing_type=sa.Boolean()) diff --git a/magnum/db/sqlalchemy/alembic/versions/5d4caa6e0a42_create_trustee_for_each_bay.py b/magnum/db/sqlalchemy/alembic/versions/5d4caa6e0a42_create_trustee_for_each_bay.py deleted file mode 100644 index 198fe5bc..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/5d4caa6e0a42_create_trustee_for_each_bay.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""create trustee for each bay - -Revision ID: 5d4caa6e0a42 -Revises: bb42b7cad130 -Create Date: 2016-02-17 14:16:12.927874 - -""" - -# revision identifiers, used by Alembic. -revision = '5d4caa6e0a42' -down_revision = 'bb42b7cad130' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.alter_column('bay', 'registry_trust_id', - new_column_name='trust_id', - existing_type=sa.String(255)) - op.add_column('bay', sa.Column('trustee_username', - sa.String(length=255), nullable=True)) - op.add_column('bay', sa.Column('trustee_user_id', - sa.String(length=255), nullable=True)) - op.add_column('bay', sa.Column('trustee_password', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_baymodel_table.py b/magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_baymodel_table.py deleted file mode 100644 index c9447d34..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/68ce16dfd341_add_master_lb_enabled_column_to_baymodel_table.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add master_lb_enabled column to baymodel table - -Revision ID: 68ce16dfd341 -Revises: 085e601a39f6 -Create Date: 2016-06-23 18:44:55.312413 - -""" - -# revision identifiers, used by Alembic. -revision = '68ce16dfd341' -down_revision = '085e601a39f6' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', - sa.Column('master_lb_enabled', sa.Boolean(), default=False)) diff --git a/magnum/db/sqlalchemy/alembic/versions/6f21dc920bb_add_cert_uuid_to_bay.py b/magnum/db/sqlalchemy/alembic/versions/6f21dc920bb_add_cert_uuid_to_bay.py deleted file mode 100644 index 02354a97..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/6f21dc920bb_add_cert_uuid_to_bay.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Add cert_uuuid to bay - -Revision ID: 6f21dc920bb -Revises: 966a99e70ff -Create Date: 2015-08-19 13:57:14.863292 - -""" - -# revision identifiers, used by Alembic. -revision = '6f21dc920bb' -down_revision = '966a99e70ff' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column( - 'bay', - sa.Column('ca_cert_uuid', sa.String(length=36), nullable=True)) - op.add_column( - 'bay', - sa.Column('magnum_cert_uuid', sa.String(length=36), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/6f21dc998bb_add_master_addresses_to_bay.py b/magnum/db/sqlalchemy/alembic/versions/6f21dc998bb_add_master_addresses_to_bay.py deleted file mode 100644 index 590e9bd6..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/6f21dc998bb_add_master_addresses_to_bay.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Add master_addresses to bay - -Revision ID: 6f21dc998bb -Revises: 421102d1f2d2 -Create Date: 2015-08-20 13:57:14.863292 - -""" - -# revision identifiers, used by Alembic. -revision = '6f21dc998bb' -down_revision = '421102d1f2d2' - -from alembic import op -from magnum.db.sqlalchemy import models -import sqlalchemy as sa - - -def upgrade(): - op.add_column( - 'bay', - sa.Column('master_addresses', - models.JSONEncodedList(), - nullable=True) - ) diff --git a/magnum/db/sqlalchemy/alembic/versions/720f640f43d1_rename_bay_table_to_cluster.py b/magnum/db/sqlalchemy/alembic/versions/720f640f43d1_rename_bay_table_to_cluster.py deleted file mode 100644 index 2fed785c..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/720f640f43d1_rename_bay_table_to_cluster.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""rename bay table to cluster - -Revision ID: 720f640f43d1 -Revises: fb03fdef8919 -Create Date: 2016-09-02 09:43:41.485934 - -""" - -# revision identifiers, used by Alembic. -revision = '720f640f43d1' -down_revision = 'fb03fdef8919' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.alter_column('bay', 'baymodel_id', - new_column_name='cluster_template_id', - existing_type=sa.String(255)) - op.alter_column('bay', 'bay_create_timeout', - new_column_name='create_timeout', - existing_type=sa.Integer()) - op.rename_table('bay', 'cluster') diff --git a/magnum/db/sqlalchemy/alembic/versions/859fb45df249_remove_replication_controller.py b/magnum/db/sqlalchemy/alembic/versions/859fb45df249_remove_replication_controller.py deleted file mode 100644 index b16e87f0..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/859fb45df249_remove_replication_controller.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""remove replication controller - -Revision ID: 859fb45df249 -Revises: 1f196a3dabae -Create Date: 2016-08-09 13:46:24.052528 - -""" - -# revision identifiers, used by Alembic. -revision = '859fb45df249' -down_revision = '1f196a3dabae' - -from alembic import op - - -def upgrade(): - op.drop_table('replicationcontroller') diff --git a/magnum/db/sqlalchemy/alembic/versions/966a99e70ff_add_proxy.py b/magnum/db/sqlalchemy/alembic/versions/966a99e70ff_add_proxy.py deleted file mode 100644 index e0b74944..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/966a99e70ff_add_proxy.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add-proxy - -Revision ID: 966a99e70ff -Revises: 6f21dc998bb -Create Date: 2015-08-24 11:23:24.262921 - -""" - -# revision identifiers, used by Alembic. -revision = '966a99e70ff' -down_revision = '6f21dc998bb' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('http_proxy', - sa.String(length=255), nullable=True)) - op.add_column('baymodel', sa.Column('https_proxy', - sa.String(length=255), nullable=True)) - op.add_column('baymodel', sa.Column('no_proxy', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/a1136d335540_add_docker_storage_driver_column.py b/magnum/db/sqlalchemy/alembic/versions/a1136d335540_add_docker_storage_driver_column.py deleted file mode 100644 index edf10399..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/a1136d335540_add_docker_storage_driver_column.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Add docker storage driver column - -Revision ID: a1136d335540 -Revises: d072f58ab240 -Create Date: 2016-03-07 19:00:28.738486 - -""" - -# revision identifiers, used by Alembic. -revision = 'a1136d335540' -down_revision = 'd072f58ab240' - -from alembic import op -import sqlalchemy as sa - - -docker_storage_driver_enum = sa.Enum('devicemapper', 'overlay', - name='docker_storage_driver') - - -def upgrade(): - docker_storage_driver_enum.create(op.get_bind(), checkfirst=True) - op.add_column('baymodel', sa.Column('docker_storage_driver', - docker_storage_driver_enum, - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/aa0cc27839af_add_docker_volume_size_to_cluster.py b/magnum/db/sqlalchemy/alembic/versions/aa0cc27839af_add_docker_volume_size_to_cluster.py deleted file mode 100644 index 75dcdc74..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/aa0cc27839af_add_docker_volume_size_to_cluster.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add docker_volume_size to cluster - -Revision ID: aa0cc27839af -Revises: bc46ba6cf949 -Create Date: 2017-06-07 13:08:02.853105 - -""" - -# revision identifiers, used by Alembic. -revision = 'aa0cc27839af' -down_revision = 'bc46ba6cf949' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - pass - op.add_column('cluster', sa.Column('docker_volume_size', - sa.Integer(), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/adc3b7679ae_add_registry_trust_id_to_bay.py b/magnum/db/sqlalchemy/alembic/versions/adc3b7679ae_add_registry_trust_id_to_bay.py deleted file mode 100644 index c780d10f..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/adc3b7679ae_add_registry_trust_id_to_bay.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add registry_trust_id to bay - -Revision ID: adc3b7679ae -Revises: 40f325033343 -Create Date: 2015-12-07 15:49:07.622122 - -""" - -# revision identifiers, used by Alembic. -revision = 'adc3b7679ae' -down_revision = '40f325033343' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('bay', sa.Column('registry_trust_id', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.py b/magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.py deleted file mode 100644 index f90e5712..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/b1f612248cab_add_floating_ip_enabled_column_to_.py +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Add floating_ip_enabled column to baymodel table - -Revision ID: b1f612248cab -Revises: 859fb45df249 -Create Date: 2016-08-05 15:31:46.203266 - -""" - -# revision identifiers, used by Alembic. -revision = 'b1f612248cab' -down_revision = '859fb45df249' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', - sa.Column('floating_ip_enabled', - sa.Boolean(), - default=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/bb42b7cad130_remove_node_object.py b/magnum/db/sqlalchemy/alembic/versions/bb42b7cad130_remove_node_object.py deleted file mode 100644 index 53ff0769..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/bb42b7cad130_remove_node_object.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""remove node object - -Revision ID: bb42b7cad130 -Revises: 05d3e97de9ee -Create Date: 2016-02-02 16:04:36.501547 - -""" - -# revision identifiers, used by Alembic. -revision = 'bb42b7cad130' -down_revision = '05d3e97de9ee' - -from alembic import op - - -def upgrade(): - op.drop_table('node') diff --git a/magnum/db/sqlalchemy/alembic/versions/bc46ba6cf949_add_keypair_to_cluster.py b/magnum/db/sqlalchemy/alembic/versions/bc46ba6cf949_add_keypair_to_cluster.py deleted file mode 100644 index d40c265f..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/bc46ba6cf949_add_keypair_to_cluster.py +++ /dev/null @@ -1,32 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add keypair to cluster - -Revision ID: bc46ba6cf949 -Revises: 720f640f43d1 -Create Date: 2016-10-03 10:47:08.584635 - -""" - -# revision identifiers, used by Alembic. -revision = 'bc46ba6cf949' -down_revision = '720f640f43d1' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('cluster', sa.Column('keypair', sa.String(length=255), - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/d072f58ab240_modify_x509keypair_table.py b/magnum/db/sqlalchemy/alembic/versions/d072f58ab240_modify_x509keypair_table.py deleted file mode 100644 index b6d8f837..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/d072f58ab240_modify_x509keypair_table.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2016 Intel Technologies India Pvt. Ld. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""modify x509keypair table - -Revision ID: d072f58ab240 -Revises: e647f5931da8 -Create Date: 2016-05-27 15:29:22.955268 - -""" - -# revision identifiers, used by Alembic. -revision = 'd072f58ab240' -down_revision = 'ef08a5e057bd' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.drop_column('x509keypair', 'bay_uuid') - op.drop_column('x509keypair', 'name') - op.drop_column('x509keypair', 'ca_cert') - op.add_column('x509keypair', sa.Column('intermediates', - sa.Text(), nullable=True)) - op.add_column('x509keypair', sa.Column('private_key_passphrase', - sa.Text(), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel_table.py b/magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel_table.py deleted file mode 100644 index 7dfaffd4..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/e0653b2d5271_add_fixed_subnet_column_to_baymodel_table.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Add fixed_subnet column to baymodel table - -Revision ID: e0653b2d5271 -Revises: 68ce16dfd341 -Create Date: 2016-06-29 14:14:37.862594 - -""" - -# revision identifiers, used by Alembic. -revision = 'e0653b2d5271' -down_revision = '68ce16dfd341' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('fixed_subnet', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/e647f5931da8_add_insecure_registry_to_baymodel.py b/magnum/db/sqlalchemy/alembic/versions/e647f5931da8_add_insecure_registry_to_baymodel.py deleted file mode 100644 index a993f57c..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/e647f5931da8_add_insecure_registry_to_baymodel.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add insecure_registry to baymodel - -Revision ID: e647f5931da8 -Revises: 049f81f6f584 -Create Date: 2016-03-28 09:08:07.467102 - -""" - -# revision identifiers, used by Alembic. -revision = 'e647f5931da8' -down_revision = '049f81f6f584' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('baymodel', sa.Column('insecure_registry', - sa.String(length=255), nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/e772b2598d9_add_container_command.py b/magnum/db/sqlalchemy/alembic/versions/e772b2598d9_add_container_command.py deleted file mode 100644 index 66859c91..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/e772b2598d9_add_container_command.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add-container-command - -Revision ID: e772b2598d9 -Revises: 4ea34a59a64c -Create Date: 2015-04-17 18:59:52.770329 - -""" - -# revision identifiers, used by Alembic. -revision = 'e772b2598d9' -down_revision = '4ea34a59a64c' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('container', - sa.Column('command', sa.String(length=255), - nullable=True)) diff --git a/magnum/db/sqlalchemy/alembic/versions/ee92b41b8809_create_quotas_table.py b/magnum/db/sqlalchemy/alembic/versions/ee92b41b8809_create_quotas_table.py deleted file mode 100644 index 7c444216..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/ee92b41b8809_create_quotas_table.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2016 Yahoo! Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Introduce Quotas - -Revision ID: ee92b41b8809 -Revises: 5d4caa6e0a42 -Create Date: 2016-02-26 18:32:08.992964 - -""" - -# revision identifiers, used by Alembic. -revision = 'ee92b41b8809' -down_revision = '5d4caa6e0a42' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'quotas', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('project_id', sa.String(length=255), nullable=True), - sa.Column('resource', sa.String(length=255), nullable=True), - sa.Column('hard_limit', sa.Integer(), nullable=False), - sa.PrimaryKeyConstraint('id'), - mysql_ENGINE='InnoDB', - mysql_DEFAULT_CHARSET='UTF8' - ) - op.create_unique_constraint( - "uniq_quotas0project_id0resource", - "quotas", ["project_id", "resource"]) diff --git a/magnum/db/sqlalchemy/alembic/versions/ef08a5e057bd_remove_pod.py b/magnum/db/sqlalchemy/alembic/versions/ef08a5e057bd_remove_pod.py deleted file mode 100644 index 82a9a9cf..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/ef08a5e057bd_remove_pod.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""remove pod object - -Revision ID: ef08a5e057bd -Revises: e647f5931da8 -Create Date: 2016-05-24 13:52:39.782156 - -""" - -# revision identifiers, used by Alembic. -revision = 'ef08a5e057bd' -down_revision = 'e647f5931da8' - -from alembic import op - - -def upgrade(): - op.drop_table('pod') diff --git a/magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.py b/magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.py deleted file mode 100644 index 9a7f1b1e..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/fb03fdef8919_rename_baymodel_to_clustertemplate.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""rename_baymodel_to_clustertemplate - -Revision ID: fb03fdef8919 -Revises: fcb4efee8f8b -Create Date: 2016-08-31 12:40:31.165817 - -""" - -# revision identifiers, used by Alembic. -revision = 'fb03fdef8919' -down_revision = 'fcb4efee8f8b' - -from alembic import op - - -def upgrade(): - op.rename_table('baymodel', 'cluster_template') diff --git a/magnum/db/sqlalchemy/alembic/versions/fcb4efee8f8b_add_version_info_to_bay.py b/magnum/db/sqlalchemy/alembic/versions/fcb4efee8f8b_add_version_info_to_bay.py deleted file mode 100644 index f58ff70e..00000000 --- a/magnum/db/sqlalchemy/alembic/versions/fcb4efee8f8b_add_version_info_to_bay.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""add version info to bay - -Revision ID: fcb4efee8f8b -Revises: b1f612248cab -Create Date: 2016-08-22 15:04:32.256811 - -""" - -# revision identifiers, used by Alembic. -revision = 'fcb4efee8f8b' -down_revision = 'b1f612248cab' - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('bay', - sa.Column('coe_version', sa.String(length=255), - nullable=True)) - op.add_column('bay', - sa.Column('container_version', sa.String(length=255), - nullable=True)) diff --git a/magnum/db/sqlalchemy/api.py b/magnum/db/sqlalchemy/api.py deleted file mode 100644 index 8898239c..00000000 --- a/magnum/db/sqlalchemy/api.py +++ /dev/null @@ -1,636 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""SQLAlchemy storage backend.""" - -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import session as db_session -from oslo_db.sqlalchemy import utils as db_utils -from oslo_utils import importutils -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import sqlalchemy as sa -from sqlalchemy.orm.exc import MultipleResultsFound -from sqlalchemy.orm.exc import NoResultFound -from sqlalchemy.sql import func - -from magnum.common import clients -from magnum.common import context as request_context -from magnum.common import exception -import magnum.conf -from magnum.db import api -from magnum.db.sqlalchemy import models -from magnum.i18n import _ - -profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') - -CONF = magnum.conf.CONF - - -_FACADE = None - - -def _create_facade_lazily(): - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade.from_config(CONF) - if profiler_sqlalchemy: - if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: - profiler_sqlalchemy.add_tracing(sa, _FACADE.get_engine(), "db") - - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def get_backend(): - """The backend is this module itself.""" - return Connection() - - -def model_query(model, *args, **kwargs): - """Query helper for simpler session usage. - - :param session: if present, the session to use - """ - - session = kwargs.get('session') or get_session() - query = session.query(model, *args) - return query - - -def add_identity_filter(query, value): - """Adds an identity filter to a query. - - Filters results by ID, if supplied value is a valid integer. - Otherwise attempts to filter results by UUID. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - if strutils.is_int_like(value): - return query.filter_by(id=value) - elif uuidutils.is_uuid_like(value): - return query.filter_by(uuid=value) - else: - raise exception.InvalidIdentity(identity=value) - - -def _paginate_query(model, limit=None, marker=None, sort_key=None, - sort_dir=None, query=None): - if not query: - query = model_query(model) - sort_keys = ['id'] - if sort_key and sort_key not in sort_keys: - sort_keys.insert(0, sort_key) - try: - query = db_utils.paginate_query(query, model, limit, sort_keys, - marker=marker, sort_dir=sort_dir) - except db_exc.InvalidSortKey: - raise exception.InvalidParameterValue( - _('The sort_key value "%(key)s" is an invalid field for sorting') - % {'key': sort_key}) - return query.all() - - -class Connection(api.Connection): - """SqlAlchemy connection.""" - - def __init__(self): - pass - - def _add_tenant_filters(self, context, query): - if context.is_admin and context.all_tenants: - return query - - admin_context = request_context.make_admin_context(all_tenants=True) - osc = clients.OpenStackClients(admin_context) - kst = osc.keystone() - - # User in a regular project (not in the trustee domain) - if context.project_id and context.domain_id != kst.trustee_domain_id: - query = query.filter_by(project_id=context.project_id) - # Match project ID component in trustee user's user name against - # cluster's project_id to associate per-cluster trustee users who have - # no project information with the project their clusters/cluster models - # reside in. This is equivalent to the project filtering above. - elif context.domain_id == kst.trustee_domain_id: - user_name = kst.client.users.get(context.user_id).name - user_project = user_name.split('_', 2)[1] - query = query.filter_by(project_id=user_project) - else: - query = query.filter_by(user_id=context.user_id) - - return query - - def _add_clusters_filters(self, query, filters): - if filters is None: - filters = {} - - possible_filters = ["cluster_template_id", "name", "node_count", - "master_count", "stack_id", "api_address", - "node_addresses", "project_id", "user_id"] - - filter_names = set(filters).intersection(possible_filters) - filter_dict = {filter_name: filters[filter_name] - for filter_name in filter_names} - - query = query.filter_by(**filter_dict) - - if 'status' in filters: - query = query.filter(models.Cluster.status.in_(filters['status'])) - - return query - - def get_cluster_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Cluster) - query = self._add_tenant_filters(context, query) - query = self._add_clusters_filters(query, filters) - return _paginate_query(models.Cluster, limit, marker, - sort_key, sort_dir, query) - - def create_cluster(self, values): - # ensure defaults are present for new clusters - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - cluster = models.Cluster() - cluster.update(values) - try: - cluster.save() - except db_exc.DBDuplicateEntry: - raise exception.ClusterAlreadyExists(uuid=values['uuid']) - return cluster - - def get_cluster_by_id(self, context, cluster_id): - query = model_query(models.Cluster) - query = self._add_tenant_filters(context, query) - query = query.filter_by(id=cluster_id) - try: - return query.one() - except NoResultFound: - raise exception.ClusterNotFound(cluster=cluster_id) - - def get_cluster_by_name(self, context, cluster_name): - query = model_query(models.Cluster) - query = self._add_tenant_filters(context, query) - query = query.filter_by(name=cluster_name) - try: - return query.one() - except MultipleResultsFound: - raise exception.Conflict('Multiple clusters exist with same name.' - ' Please use the cluster uuid instead.') - except NoResultFound: - raise exception.ClusterNotFound(cluster=cluster_name) - - def get_cluster_by_uuid(self, context, cluster_uuid): - query = model_query(models.Cluster) - query = self._add_tenant_filters(context, query) - query = query.filter_by(uuid=cluster_uuid) - try: - return query.one() - except NoResultFound: - raise exception.ClusterNotFound(cluster=cluster_uuid) - - def get_cluster_stats(self, context, project_id=None): - query = model_query(models.Cluster) - node_count_col = models.Cluster.node_count - master_count_col = models.Cluster.master_count - ncfunc = func.sum(node_count_col + master_count_col) - - if project_id: - query = query.filter_by(project_id=project_id) - nquery = query.session.query(ncfunc.label("nodes")).filter_by( - project_id=project_id) - else: - nquery = query.session.query(ncfunc.label("nodes")) - - clusters = query.count() - nodes = int(nquery.one()[0]) if nquery.one()[0] else 0 - return clusters, nodes - - def get_cluster_count_all(self, context, filters=None): - query = model_query(models.Cluster) - query = self._add_tenant_filters(context, query) - query = self._add_clusters_filters(query, filters) - return query.count() - - def destroy_cluster(self, cluster_id): - session = get_session() - with session.begin(): - query = model_query(models.Cluster, session=session) - query = add_identity_filter(query, cluster_id) - - try: - query.one() - except NoResultFound: - raise exception.ClusterNotFound(cluster=cluster_id) - - query.delete() - - def update_cluster(self, cluster_id, values): - # NOTE(dtantsur): this can lead to very strange errors - if 'uuid' in values: - msg = _("Cannot overwrite UUID for an existing Cluster.") - raise exception.InvalidParameterValue(err=msg) - - return self._do_update_cluster(cluster_id, values) - - def _do_update_cluster(self, cluster_id, values): - session = get_session() - with session.begin(): - query = model_query(models.Cluster, session=session) - query = add_identity_filter(query, cluster_id) - try: - ref = query.with_lockmode('update').one() - except NoResultFound: - raise exception.ClusterNotFound(cluster=cluster_id) - - ref.update(values) - return ref - - def _add_cluster_template_filters(self, query, filters): - if filters is None: - filters = {} - - possible_filters = ["name", "image_id", "flavor_id", - "master_flavor_id", "keypair_id", - "external_network_id", "dns_nameserver", - "project_id", "user_id", "labels"] - - filter_names = set(filters).intersection(possible_filters) - filter_dict = {filter_name: filters[filter_name] - for filter_name in filter_names} - - return query.filter_by(**filter_dict) - - def get_cluster_template_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None): - query = model_query(models.ClusterTemplate) - query = self._add_tenant_filters(context, query) - query = self._add_cluster_template_filters(query, filters) - # include public ClusterTemplates - public_q = model_query(models.ClusterTemplate).filter_by(public=True) - query = query.union(public_q) - - return _paginate_query(models.ClusterTemplate, limit, marker, - sort_key, sort_dir, query) - - def create_cluster_template(self, values): - # ensure defaults are present for new ClusterTemplates - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - cluster_template = models.ClusterTemplate() - cluster_template.update(values) - try: - cluster_template.save() - except db_exc.DBDuplicateEntry: - raise exception.ClusterTemplateAlreadyExists(uuid=values['uuid']) - return cluster_template - - def get_cluster_template_by_id(self, context, cluster_template_id): - query = model_query(models.ClusterTemplate) - query = self._add_tenant_filters(context, query) - public_q = model_query(models.ClusterTemplate).filter_by(public=True) - query = query.union(public_q) - query = query.filter_by(id=cluster_template_id) - try: - return query.one() - except NoResultFound: - raise exception.ClusterTemplateNotFound( - clustertemplate=cluster_template_id) - - def get_cluster_template_by_uuid(self, context, cluster_template_uuid): - query = model_query(models.ClusterTemplate) - query = self._add_tenant_filters(context, query) - public_q = model_query(models.ClusterTemplate).filter_by(public=True) - query = query.union(public_q) - query = query.filter_by(uuid=cluster_template_uuid) - try: - return query.one() - except NoResultFound: - raise exception.ClusterTemplateNotFound( - clustertemplate=cluster_template_uuid) - - def get_cluster_template_by_name(self, context, cluster_template_name): - query = model_query(models.ClusterTemplate) - query = self._add_tenant_filters(context, query) - public_q = model_query(models.ClusterTemplate).filter_by(public=True) - query = query.union(public_q) - query = query.filter_by(name=cluster_template_name) - try: - return query.one() - except MultipleResultsFound: - raise exception.Conflict('Multiple ClusterTemplates exist with' - ' same name. Please use the ' - 'ClusterTemplate uuid instead.') - except NoResultFound: - raise exception.ClusterTemplateNotFound( - clustertemplate=cluster_template_name) - - def _is_cluster_template_referenced(self, session, cluster_template_uuid): - """Checks whether the ClusterTemplate is referenced by cluster(s).""" - query = model_query(models.Cluster, session=session) - query = self._add_clusters_filters(query, {'cluster_template_id': - cluster_template_uuid}) - return query.count() != 0 - - def _is_publishing_cluster_template(self, values): - if (len(values) == 1 and - 'public' in values and values['public'] is True): - return True - return False - - def destroy_cluster_template(self, cluster_template_id): - session = get_session() - with session.begin(): - query = model_query(models.ClusterTemplate, session=session) - query = add_identity_filter(query, cluster_template_id) - - try: - cluster_template_ref = query.one() - except NoResultFound: - raise exception.ClusterTemplateNotFound( - clustertemplate=cluster_template_id) - - if self._is_cluster_template_referenced( - session, cluster_template_ref['uuid']): - raise exception.ClusterTemplateReferenced( - clustertemplate=cluster_template_id) - - query.delete() - - def update_cluster_template(self, cluster_template_id, values): - # NOTE(dtantsur): this can lead to very strange errors - if 'uuid' in values: - msg = _("Cannot overwrite UUID for an existing ClusterTemplate.") - raise exception.InvalidParameterValue(err=msg) - - return self._do_update_cluster_template(cluster_template_id, values) - - def _do_update_cluster_template(self, cluster_template_id, values): - session = get_session() - with session.begin(): - query = model_query(models.ClusterTemplate, session=session) - query = add_identity_filter(query, cluster_template_id) - try: - ref = query.with_lockmode('update').one() - except NoResultFound: - raise exception.ClusterTemplateNotFound( - clustertemplate=cluster_template_id) - - if self._is_cluster_template_referenced(session, ref['uuid']): - # we only allow to update ClusterTemplate to be public - if not self._is_publishing_cluster_template(values): - raise exception.ClusterTemplateReferenced( - clustertemplate=cluster_template_id) - - ref.update(values) - return ref - - def create_x509keypair(self, values): - # ensure defaults are present for new x509keypairs - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - x509keypair = models.X509KeyPair() - x509keypair.update(values) - try: - x509keypair.save() - except db_exc.DBDuplicateEntry: - raise exception.X509KeyPairAlreadyExists(uuid=values['uuid']) - return x509keypair - - def get_x509keypair_by_id(self, context, x509keypair_id): - query = model_query(models.X509KeyPair) - query = self._add_tenant_filters(context, query) - query = query.filter_by(id=x509keypair_id) - try: - return query.one() - except NoResultFound: - raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id) - - def get_x509keypair_by_uuid(self, context, x509keypair_uuid): - query = model_query(models.X509KeyPair) - query = self._add_tenant_filters(context, query) - query = query.filter_by(uuid=x509keypair_uuid) - try: - return query.one() - except NoResultFound: - raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid) - - def destroy_x509keypair(self, x509keypair_id): - session = get_session() - with session.begin(): - query = model_query(models.X509KeyPair, session=session) - query = add_identity_filter(query, x509keypair_id) - count = query.delete() - if count != 1: - raise exception.X509KeyPairNotFound(x509keypair_id) - - def update_x509keypair(self, x509keypair_id, values): - # NOTE(dtantsur): this can lead to very strange errors - if 'uuid' in values: - msg = _("Cannot overwrite UUID for an existing X509KeyPair.") - raise exception.InvalidParameterValue(err=msg) - - return self._do_update_x509keypair(x509keypair_id, values) - - def _do_update_x509keypair(self, x509keypair_id, values): - session = get_session() - with session.begin(): - query = model_query(models.X509KeyPair, session=session) - query = add_identity_filter(query, x509keypair_id) - try: - ref = query.with_lockmode('update').one() - except NoResultFound: - raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id) - - ref.update(values) - return ref - - def _add_x509keypairs_filters(self, query, filters): - if filters is None: - filters = {} - - if 'project_id' in filters: - query = query.filter_by(project_id=filters['project_id']) - if 'user_id' in filters: - query = query.filter_by(user_id=filters['user_id']) - - return query - - def get_x509keypair_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None): - query = model_query(models.X509KeyPair) - query = self._add_tenant_filters(context, query) - query = self._add_x509keypairs_filters(query, filters) - return _paginate_query(models.X509KeyPair, limit, marker, - sort_key, sort_dir, query) - - def destroy_magnum_service(self, magnum_service_id): - session = get_session() - with session.begin(): - query = model_query(models.MagnumService, session=session) - query = add_identity_filter(query, magnum_service_id) - count = query.delete() - if count != 1: - raise exception.MagnumServiceNotFound( - magnum_service_id=magnum_service_id) - - def update_magnum_service(self, magnum_service_id, values): - session = get_session() - with session.begin(): - query = model_query(models.MagnumService, session=session) - query = add_identity_filter(query, magnum_service_id) - try: - ref = query.with_lockmode('update').one() - except NoResultFound: - raise exception.MagnumServiceNotFound( - magnum_service_id=magnum_service_id) - - if 'report_count' in values: - if values['report_count'] > ref.report_count: - ref.last_seen_up = timeutils.utcnow() - - ref.update(values) - return ref - - def get_magnum_service_by_host_and_binary(self, host, binary): - query = model_query(models.MagnumService) - query = query.filter_by(host=host, binary=binary) - try: - return query.one() - except NoResultFound: - return None - - def create_magnum_service(self, values): - magnum_service = models.MagnumService() - magnum_service.update(values) - try: - magnum_service.save() - except db_exc.DBDuplicateEntry: - raise exception.MagnumServiceAlreadyExists(id=magnum_service['id']) - return magnum_service - - def get_magnum_service_list(self, disabled=None, limit=None, - marker=None, sort_key=None, sort_dir=None - ): - query = model_query(models.MagnumService) - if disabled: - query = query.filter_by(disabled=disabled) - - return _paginate_query(models.MagnumService, limit, marker, - sort_key, sort_dir, query) - - def create_quota(self, values): - quotas = models.Quota() - quotas.update(values) - try: - quotas.save() - except db_exc.DBDuplicateEntry: - raise exception.QuotaAlreadyExists(project_id=values['project_id'], - resource=values['resource']) - return quotas - - def _add_quota_filters(self, query, filters): - if filters is None: - filters = {} - - possible_filters = ["resource", "project_id"] - - filter_names = set(filters).intersection(possible_filters) - filter_dict = {filter_name: filters[filter_name] - for filter_name in filter_names} - - query = query.filter_by(**filter_dict) - return query - - def get_quota_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Quota) - query = self._add_quota_filters(query, filters) - return _paginate_query(models.Quota, limit, marker, - sort_key, sort_dir, query) - - def update_quota(self, project_id, values): - session = get_session() - with session.begin(): - query = model_query(models.Quota, session=session) - resource = values['resource'] - try: - query = query.filter_by(project_id=project_id).filter_by( - resource=resource) - ref = query.with_lockmode('update').one() - except NoResultFound: - msg = (_('project_id %(project_id)s resource %(resource)s.') % - {'project_id': project_id, 'resource': resource}) - raise exception.QuotaNotFound(msg=msg) - - ref.update(values) - return ref - - def delete_quota(self, project_id, resource): - session = get_session() - with session.begin(): - query = model_query(models.Quota, session=session) - - try: - query.filter_by(project_id=project_id).filter_by( - resource=resource).one() - except NoResultFound: - msg = (_('project_id %(project_id)s resource %(resource)s.') % - {'project_id': project_id, 'resource': resource}) - raise exception.QuotaNotFound(msg=msg) - - query.delete() - - def get_quota_by_id(self, context, quota_id): - query = model_query(models.Quota) - query = query.filter_by(id=quota_id) - try: - return query.one() - except NoResultFound: - msg = _('quota id %s .') % quota_id - raise exception.QuotaNotFound(msg=msg) - - def quota_get_all_by_project_id(self, project_id): - query = model_query(models.Quota) - result = query.filter_by(project_id=project_id).all() - - return result - - def get_quota_by_project_id_resource(self, project_id, resource): - query = model_query(models.Quota) - query = query.filter_by(project_id=project_id).filter_by( - resource=resource) - - try: - return query.one() - except NoResultFound: - msg = (_('project_id %(project_id)s resource %(resource)s.') % - {'project_id': project_id, 'resource': resource}) - raise exception.QuotaNotFound(msg=msg) diff --git a/magnum/db/sqlalchemy/migration.py b/magnum/db/sqlalchemy/migration.py deleted file mode 100644 index cfbe6e89..00000000 --- a/magnum/db/sqlalchemy/migration.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_db.sqlalchemy.migration_cli import manager - -import magnum.conf - -CONF = magnum.conf.CONF -_MANAGER = None - - -def get_manager(): - global _MANAGER - if not _MANAGER: - alembic_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), 'alembic.ini')) - migrate_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), 'alembic')) - migration_config = {'alembic_ini_path': alembic_path, - 'alembic_repo_path': migrate_path, - 'db_url': CONF.database.connection} - _MANAGER = manager.MigrationManager(migration_config) - - return _MANAGER - - -def version(): - """Current database version. - - :returns: Database version - :rtype: string - """ - return get_manager().version() - - -def upgrade(version): - """Used for upgrading database. - - :param version: Desired database version - :type version: string - """ - version = version or 'head' - - get_manager().upgrade(version) - - -def stamp(revision): - """Stamps database with provided revision. - - Don't run any migrations. - - :param revision: Should match one from repository or head - to stamp - database with most recent revision - :type revision: string - """ - get_manager().stamp(revision) - - -def revision(message=None, autogenerate=False): - """Creates template for migration. - - :param message: Text that will be used for migration title - :type message: string - :param autogenerate: If True - generates diff based on current database - state - :type autogenerate: bool - """ - return get_manager().revision(message=message, autogenerate=autogenerate) diff --git a/magnum/db/sqlalchemy/models.py b/magnum/db/sqlalchemy/models.py deleted file mode 100644 index 6e545bed..00000000 --- a/magnum/db/sqlalchemy/models.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for container service -""" - -import json - -from oslo_db.sqlalchemy import models -import six.moves.urllib.parse as urlparse -from sqlalchemy import Boolean -from sqlalchemy import Column -from sqlalchemy import DateTime -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import Integer -from sqlalchemy import schema -from sqlalchemy import String -from sqlalchemy import Text -from sqlalchemy.types import TypeDecorator, TEXT - -import magnum.conf - -CONF = magnum.conf.CONF - - -def table_args(): - engine_name = urlparse.urlparse(CONF.database.connection).scheme - if engine_name == 'mysql': - return {'mysql_engine': CONF.database.mysql_engine, - 'mysql_charset': "utf8"} - return None - - -class JsonEncodedType(TypeDecorator): - """Abstract base type serialized as json-encoded string in db.""" - type = None - impl = TEXT - - def process_bind_param(self, value, dialect): - if value is None: - # Save default value according to current type to keep the - # interface the consistent. - value = self.type() - elif not isinstance(value, self.type): - raise TypeError("%(class)s supposes to store " - "%(type)s objects, but %(value)s " - "given" % {'class': self.__class__.__name__, - 'type': self.type.__name__, - 'value': type(value).__name__}) - serialized_value = json.dumps(value) - return serialized_value - - def process_result_value(self, value, dialect): - if value is not None: - value = json.loads(value) - return value - - -class JSONEncodedDict(JsonEncodedType): - """Represents dict serialized as json-encoded string in db.""" - type = dict - - -class JSONEncodedList(JsonEncodedType): - """Represents list serialized as json-encoded string in db.""" - type = list - - -class MagnumBase(models.TimestampMixin, - models.ModelBase): - - metadata = None - - def as_dict(self): - d = {} - for c in self.__table__.columns: - d[c.name] = self[c.name] - return d - - def save(self, session=None): - import magnum.db.sqlalchemy.api as db_api - - if session is None: - session = db_api.get_session() - - super(MagnumBase, self).save(session) - -Base = declarative_base(cls=MagnumBase) - - -class Cluster(Base): - """Represents a Cluster.""" - - __tablename__ = 'cluster' - __table_args__ = ( - schema.UniqueConstraint('uuid', name='uniq_bay0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True) - project_id = Column(String(255)) - user_id = Column(String(255)) - uuid = Column(String(36)) - name = Column(String(255)) - cluster_template_id = Column(String(255)) - keypair = Column(String(255)) - docker_volume_size = Column(Integer()) - stack_id = Column(String(255)) - api_address = Column(String(255)) - node_addresses = Column(JSONEncodedList) - node_count = Column(Integer()) - master_count = Column(Integer()) - status = Column(String(20)) - status_reason = Column(Text) - create_timeout = Column(Integer()) - discovery_url = Column(String(255)) - master_addresses = Column(JSONEncodedList) - # TODO(wanghua): encrypt trust_id in db - trust_id = Column(String(255)) - trustee_username = Column(String(255)) - trustee_user_id = Column(String(255)) - # TODO(wanghua): encrypt trustee_password in db - trustee_password = Column(String(255)) - coe_version = Column(String(255)) - container_version = Column(String(255)) - # (yuanying) if we use barbican, - # cert_ref size is determined by below format - # * http(s)://${DOMAIN_NAME}/v1/containers/${UUID} - # as a result, cert_ref length is estimated to 312 chars. - # but we can use another backend to store certs. - # so, we use 512 chars to get some buffer. - ca_cert_ref = Column(String(512)) - magnum_cert_ref = Column(String(512)) - - -class ClusterTemplate(Base): - """Represents a ClusterTemplate.""" - - __tablename__ = 'cluster_template' - __table_args__ = ( - schema.UniqueConstraint('uuid', name='uniq_baymodel0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True) - uuid = Column(String(36)) - project_id = Column(String(255)) - user_id = Column(String(255)) - name = Column(String(255)) - image_id = Column(String(255)) - flavor_id = Column(String(255)) - master_flavor_id = Column(String(255)) - keypair_id = Column(String(255)) - external_network_id = Column(String(255)) - fixed_network = Column(String(255)) - fixed_subnet = Column(String(255)) - network_driver = Column(String(255)) - volume_driver = Column(String(255)) - dns_nameserver = Column(String(255)) - apiserver_port = Column(Integer()) - docker_volume_size = Column(Integer()) - docker_storage_driver = Column(String(255)) - cluster_distro = Column(String(255)) - coe = Column(String(255)) - http_proxy = Column(String(255)) - https_proxy = Column(String(255)) - no_proxy = Column(String(255)) - registry_enabled = Column(Boolean, default=False) - labels = Column(JSONEncodedDict) - tls_disabled = Column(Boolean, default=False) - public = Column(Boolean, default=False) - server_type = Column(String(255)) - insecure_registry = Column(String(255)) - master_lb_enabled = Column(Boolean, default=False) - floating_ip_enabled = Column(Boolean, default=True) - - -class X509KeyPair(Base): - """X509KeyPair""" - __tablename__ = 'x509keypair' - __table_args__ = ( - schema.UniqueConstraint('uuid', - name='uniq_x509keypair0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True) - uuid = Column(String(36)) - certificate = Column(Text()) - private_key = Column(Text()) - private_key_passphrase = Column(Text()) - intermediates = Column(Text()) - project_id = Column(String(255)) - user_id = Column(String(255)) - - -class MagnumService(Base): - """Represents health status of various magnum services""" - __tablename__ = 'magnum_service' - __table_args__ = ( - schema.UniqueConstraint("host", "binary", - name="uniq_magnum_service0host0binary"), - table_args() - ) - - id = Column(Integer, primary_key=True) - host = Column(String(255)) - binary = Column(String(255)) - disabled = Column(Boolean, default=False) - disabled_reason = Column(String(255)) - last_seen_up = Column(DateTime, nullable=True) - forced_down = Column(Boolean, default=False) - report_count = Column(Integer, nullable=False, default=0) - - -class Quota(Base): - """Represents Quota for a resource within a project""" - __tablename__ = 'quotas' - __table_args__ = ( - schema.UniqueConstraint( - "project_id", "resource", - name='uniq_quotas0project_id0resource'), - table_args() - ) - id = Column(Integer, primary_key=True) - project_id = Column(String(255)) - resource = Column(String(255)) - hard_limit = Column(Integer()) diff --git a/magnum/drivers/__init__.py b/magnum/drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/common/__init__.py b/magnum/drivers/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/common/driver.py b/magnum/drivers/common/driver.py deleted file mode 100644 index 92062047..00000000 --- a/magnum/drivers/common/driver.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - -from oslo_config import cfg -from pkg_resources import iter_entry_points -from stevedore import driver - -from magnum.common import exception -from magnum.objects import cluster_template - - -CONF = cfg.CONF - - -@six.add_metaclass(abc.ABCMeta) -class Driver(object): - - definitions = None - - @classmethod - def load_entry_points(cls): - for entry_point in iter_entry_points('magnum.drivers'): - yield entry_point, entry_point.load(require=False) - - @classmethod - def get_drivers(cls): - '''Retrieves cluster drivers from python entry_points. - - Example: - - With the following classes: - class Driver1(Driver): - provides = [ - ('server_type1', 'os1', 'coe1') - ] - - class Driver2(Driver): - provides = [ - ('server_type2', 'os2', 'coe2') - ] - - And the following entry_points: - - magnum.drivers = - driver_name_1 = some.python.path:Driver1 - driver_name_2 = some.python.path:Driver2 - - get_drivers will return: - { - (server_type1, os1, coe1): - {'driver_name_1': Driver1}, - (server_type2, os2, coe2): - {'driver_name_2': Driver2} - } - - :return: dict - ''' - - if not cls.definitions: - cls.definitions = dict() - for entry_point, def_class in cls.load_entry_points(): - for cluster_type in def_class().provides: - cluster_type_tuple = (cluster_type['server_type'], - cluster_type['os'], - cluster_type['coe']) - providers = cls.definitions.setdefault(cluster_type_tuple, - dict()) - providers['entry_point_name'] = entry_point.name - providers['class'] = def_class - - return cls.definitions - - @classmethod - def get_driver(cls, server_type, os, coe): - '''Get Driver. - - Returns the Driver class for the provided cluster_type. - - With the following classes: - class Driver1(Driver): - provides = [ - ('server_type1', 'os1', 'coe1') - ] - - class Driver2(Driver): - provides = [ - ('server_type2', 'os2', 'coe2') - ] - - And the following entry_points: - - magnum.drivers = - driver_name_1 = some.python.path:Driver1 - driver_name_2 = some.python.path:Driver2 - - get_driver('server_type2', 'os2', 'coe2') - will return: Driver2 - - :param server_type: The server_type the cluster definition will build - on - :param os: The operating system the cluster definition will build on - :param coe: The Container Orchestration Environment the cluster will - produce - - :return: class - ''' - - definition_map = cls.get_drivers() - cluster_type = (server_type, os, coe) - - if cluster_type not in definition_map: - raise exception.ClusterTypeNotSupported( - server_type=server_type, - os=os, - coe=coe) - driver_info = definition_map[cluster_type] - # TODO(muralia): once --drivername is supported as an input during - # cluster create, change the following line to use driver name for - # loading. - return driver.DriverManager("magnum.drivers", - driver_info['entry_point_name']).driver() - - @classmethod - def get_driver_for_cluster(cls, context, cluster): - ct = cluster_template.ClusterTemplate.get_by_uuid( - context, cluster.cluster_template_id) - return cls.get_driver(ct.server_type, ct.cluster_distro, ct.coe) - - def update_cluster_status(self, context, cluster): - '''Update the cluster status based on underlying orchestration - - This is an optional method if your implementation does not need - to poll the orchestration for status updates (for example, your - driver uses some notification-based mechanism instead). - ''' - return - - @abc.abstractproperty - def provides(self): - '''return a list of (server_type, os, coe) tuples - - Returns a list of cluster configurations supported by this driver - ''' - raise NotImplementedError("Subclasses must implement 'provides'.") - - @abc.abstractmethod - def create_cluster(self, context, cluster, cluster_create_timeout): - raise NotImplementedError("Subclasses must implement " - "'create_cluster'.") - - @abc.abstractmethod - def update_cluster(self, context, cluster, scale_manager=None, - rollback=False): - raise NotImplementedError("Subclasses must implement " - "'update_cluster'.") - - @abc.abstractmethod - def delete_cluster(self, context, cluster): - raise NotImplementedError("Subclasses must implement " - "'delete_cluster'.") - - def get_monitor(self, context, cluster): - """return the monitor with container data for this driver.""" - - return None - - def get_scale_manager(self, context, osclient, cluster): - """return the scale manager for this driver.""" - - return None - - def rotate_ca_certificate(self, context, cluster): - raise exception.NotSupported( - "'rotate_ca_certificate' is not supported by this driver.") diff --git a/magnum/drivers/common/image/fedora-atomic/README.rst b/magnum/drivers/common/image/fedora-atomic/README.rst deleted file mode 100644 index 48823b37..00000000 --- a/magnum/drivers/common/image/fedora-atomic/README.rst +++ /dev/null @@ -1,82 +0,0 @@ -============= -fedora-atomic -============= - -Generates a Fedora Atomic image based on a public deployed tree. This element has been tested under Debian, Ubuntu, CentOS and Fedora operating systems. - -Pre-requisites to run diskimage-builder ---------------------------------------- -For diskimage-builder to work, following packages need to be -present: - -* python-dev -* build-essential -* python-pip -* kpartx -* python-lzma -* qemu-utils -* yum -* yum-utils -* python-yaml -* curl - -For Debian/Ubuntu systems, use:: - - apt-get install python-dev build-essential python-pip kpartx python-lzma \ - qemu-utils yum yum-utils python-yaml git curl - -For CentOS and Fedora < 22, use:: - - yum install python-dev build-essential python-pip kpartx python-lzma qemu-utils yum yum-utils python-yaml curl - -For Fedora >= 22, use:: - - dnf install python-devel @development-tools python-pip kpartx python-backports-lzma @virtualization yum yum-utils python-yaml curl - -How to generate Fedora Atomic image ------------------------------------ -To generate an atomic image for Fedora 25 these commands can be -executed:: - - # Install diskimage-builder in virtual environment - virtualenv . - . bin/activate - pip install diskimage-builder - git clone https://git.openstack.org/openstack/magnum - git clone https://git.openstack.org/openstack/dib-utils.git - - export PATH="${PWD}/dib-utils/bin:$PATH" - - export ELEMENTS_PATH=$(python -c 'import os, diskimage_builder, pkg_resources;print(os.path.abspath(pkg_resources.resource_filename(diskimage_builder.__name__, "elements")))') - export ELEMENTS_PATH="${ELEMENTS_PATH}:${PWD}/magnum/magnum/drivers/common/image" - - export DIB_RELEASE=25 # this can be switched to the desired version - export DIB_IMAGE_SIZE=2.5 # we need to give a bit more space to loopback device - - disk-image-create fedora-atomic -o fedora-atomic - -This element can consume already published trees, but you can use it -to consume your own generated trees. Documentation about creating own trees -can be found at `http://developers.redhat.com/blog/2015/01/08/creating-custom-atomic-trees-images-and-installers-part-1/ `_ - -Environment Variables ---------------------- - -To properly reference the tree, the following env vars can be set: - -FEDORA_ATOMIC_TREE_URL - :Required: Yes - :Description: Url for the public fedora-atomic tree to use. It can - reference to own published trees. - :Default: ``https://kojipkgs.fedoraproject.org/atomic/${DIB_RELEASE}/`` - - -FEDORA_ATOMIC_TREE_REF - :Required: Yes - :Description: Reference of the tree to install. - :Default: ``$(curl ${FEDORA_ATOMIC_TREE_URL}/refs/heads/fedora-atomic/${DIB_RELEASE}/x86_64/docker-host)`` - -You can use the defaults or export your url and reference, like following:: - - export FEDORA_ATOMIC_TREE_URL="https://kojipkgs.fedoraproject.org/atomic/25/" - export FEDORA_ATOMIC_TREE_REF="$(curl https://kojipkgs.fedoraproject.org/atomic/25/refs/heads/fedora-atomic/25/x86_64/docker-host)" diff --git a/magnum/drivers/common/image/fedora-atomic/element-deps b/magnum/drivers/common/image/fedora-atomic/element-deps deleted file mode 100644 index 2e689538..00000000 --- a/magnum/drivers/common/image/fedora-atomic/element-deps +++ /dev/null @@ -1,4 +0,0 @@ -fedora-minimal -growroot -package-installs -vm diff --git a/magnum/drivers/common/image/fedora-atomic/environment.d/50-fedora-atomic b/magnum/drivers/common/image/fedora-atomic/environment.d/50-fedora-atomic deleted file mode 100644 index 5ed6f5fc..00000000 --- a/magnum/drivers/common/image/fedora-atomic/environment.d/50-fedora-atomic +++ /dev/null @@ -1,2 +0,0 @@ -export FEDORA_ATOMIC_TREE_URL=${FEDORA_ATOMIC_TREE_URL:-https://kojipkgs.fedoraproject.org/atomic/${DIB_RELEASE}/} -export FEDORA_ATOMIC_TREE_REF=${FEDORA_ATOMIC_TREE_REF:-$(curl ${FEDORA_ATOMIC_TREE_URL}/refs/heads/fedora-atomic/${DIB_RELEASE}/x86_64/docker-host)} diff --git a/magnum/drivers/common/image/fedora-atomic/finalise.d/80-fedora-atomic b/magnum/drivers/common/image/fedora-atomic/finalise.d/80-fedora-atomic deleted file mode 100755 index 6d70c42b..00000000 --- a/magnum/drivers/common/image/fedora-atomic/finalise.d/80-fedora-atomic +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -# generate ostree in root -cd / -ostree admin os-init fedora-atomic -ostree remote add --set=gpg-verify=false fedora-atomic ${FEDORA_ATOMIC_TREE_URL} -ostree pull fedora-atomic ${FEDORA_ATOMIC_TREE_REF} -ostree remote delete fedora-atomic -ostree admin deploy --os=fedora-atomic ${FEDORA_ATOMIC_TREE_REF} --karg-proc-cmdline --karg=selinux=0 - -# copy /etc/fstab to the deployed directory -SYSROOT=/ostree/deploy/fedora-atomic/deploy/${FEDORA_ATOMIC_TREE_REF}.0 -cp /etc/fstab $SYSROOT/etc/ - -# need to find the generated images -DEPLOYED_DIRECTORY=$(find /boot/ostree -name fedora-atomic-* -type d) -DEPLOYED_ID=${DEPLOYED_DIRECTORY##*-} -INIT_IMAGE=$(find ${DEPLOYED_DIRECTORY} -name initramfs*.img) -VMLINUZ_IMAGE=$(find ${DEPLOYED_DIRECTORY} -name vmlinuz*) - -# generate ostree boot -cat > /etc/grub.d/15_ostree < - This is a template resource that accepts public and private IPs from both - a Neutron LBaaS Pool and a master node. It connects the master inputs - to its outputs, essentially acting as one state of a multiplexer. - -parameters: - - pool_public_ip: - type: string - default: "" - - pool_private_ip: - type: string - default: "" - - master_public_ip: - type: string - default: "" - - master_private_ip: - type: string - default: "" - -outputs: - - public_ip: - value: {get_param: master_public_ip} - - private_ip: - value: {get_param: master_private_ip} diff --git a/magnum/drivers/common/templates/fragments/api_gateway_switcher_pool.yaml b/magnum/drivers/common/templates/fragments/api_gateway_switcher_pool.yaml deleted file mode 100644 index d2887874..00000000 --- a/magnum/drivers/common/templates/fragments/api_gateway_switcher_pool.yaml +++ /dev/null @@ -1,32 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a template resource that accepts public and private IPs from both - a Neutron LBaaS Pool and a master node. It connects the pool inputs - to its outputs, essentially acting as one state of a multiplexer. - -parameters: - - pool_public_ip: - type: string - default: "" - - pool_private_ip: - type: string - default: "" - - master_public_ip: - type: string - default: "" - - master_private_ip: - type: string - default: "" - -outputs: - - public_ip: - value: {get_param: pool_public_ip} - - private_ip: - value: {get_param: pool_private_ip} diff --git a/magnum/drivers/common/templates/fragments/configure-docker-registry.sh b/magnum/drivers/common/templates/fragments/configure-docker-registry.sh deleted file mode 100644 index f3cfcfa5..00000000 --- a/magnum/drivers/common/templates/fragments/configure-docker-registry.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ "$REGISTRY_ENABLED" = "False" ]; then - exit 0 -fi - -cat > /etc/sysconfig/registry-config.yml << EOF -version: 0.1 -log: - fields: - service: registry -storage: - cache: - layerinfo: inmemory - swift: - authurl: "$AUTH_URL" - region: "$SWIFT_REGION" - username: "$TRUSTEE_USERNAME" - password: "$TRUSTEE_PASSWORD" - domainid: "$TRUSTEE_DOMAIN_ID" - trustid: "$TRUST_ID" - container: "$REGISTRY_CONTAINER" - insecureskipverify: $REGISTRY_INSECURE - chunksize: $REGISTRY_CHUNKSIZE -http: - addr: :5000 -EOF - -cat > /etc/systemd/system/registry.service << EOF -[Unit] -Description=Docker registry v2 -Requires=docker.service -After=docker.service - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/usr/bin/docker run -d -p $REGISTRY_PORT:5000 --restart=always --name registry -v /etc/sysconfig/registry-config.yml:/etc/docker/registry/config.yml registry:2 -ExecStop=/usr/bin/docker rm -f registry - -[Install] -WantedBy=multi-user.target -EOF diff --git a/magnum/drivers/common/templates/fragments/configure-docker-storage.sh b/magnum/drivers/common/templates/fragments/configure-docker-storage.sh deleted file mode 100644 index 4704e075..00000000 --- a/magnum/drivers/common/templates/fragments/configure-docker-storage.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then - if [ "$ENABLE_CINDER" == "False" ]; then - # FIXME(yuanying): Use ephemeral disk for docker storage - # Currently Ironic doesn't support cinder volumes, - # so we must use preserved ephemeral disk instead of a cinder volume. - device_path=$(readlink -f /dev/disk/by-label/ephemeral0) - else - attempts=60 - while [ ${attempts} -gt 0 ]; do - device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$) - if [ -n "${device_name}" ]; then - break - fi - echo "waiting for disk device" - sleep 0.5 - udevadm trigger - let attempts-- - done - - if [ -z "${device_name}" ]; then - echo "ERROR: disk device does not exist" >&2 - exit 1 - fi - - device_path=/dev/disk/by-id/${device_name} - fi -fi - -$configure_docker_storage_driver - -if [ "$DOCKER_STORAGE_DRIVER" = "overlay" ]; then - if [ $(echo -e "$(uname -r)\n3.18" | sort -V | head -1) = $(uname -r) ]; then - ERROR_MESSAGE="OverlayFS requires at least Linux kernel 3.18. Cluster node kernel version: $(uname -r)" - echo "ERROR: ${ERROR_MESSAGE}" >&2 - sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"${ERROR_MESSAGE}\"}'" - else - configure_overlay - fi -else - configure_devicemapper -fi diff --git a/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh b/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh deleted file mode 100644 index 9d92a666..00000000 --- a/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh +++ /dev/null @@ -1,57 +0,0 @@ -# This file contains docker storage drivers configuration for fedora -# atomic hosts. Currently, devicemapper and overlay are supported. - -# * Remove any existing docker-storage configuration. In case of an -# existing configuration, docker-storage-setup will fail. -# * Remove docker storage graph -clear_docker_storage () { - # stop docker - systemctl stop docker - # clear storage graph - rm -rf /var/lib/docker/* - # remove current LVs - docker-storage-setup --reset - - if [ -f /etc/sysconfig/docker-storage ]; then - sed -i "/^DOCKER_STORAGE_OPTIONS=/ s/=.*/=/" /etc/sysconfig/docker-storage - fi -} - -# Configure docker storage with xfs as backing filesystem. -configure_overlay () { - clear_docker_storage - - if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then - mkfs.xfs -f ${device_path} - echo "${device_path} /var/lib/docker xfs defaults 0 0" >> /etc/fstab - mount -a - fi - - echo "STORAGE_DRIVER=overlay" > /etc/sysconfig/docker-storage-setup - - docker-storage-setup - - local lvname=$(lvdisplay | grep "LV\ Path" | awk '{print $3}') - local pvname=$(pvdisplay | grep "PV\ Name" | awk '{print $3}') - lvextend -r $lvname $pvname -} - -# Configure docker storage with devicemapper using direct LVM -configure_devicemapper () { - clear_docker_storage - - echo "GROWROOT=True" > /etc/sysconfig/docker-storage-setup - echo "ROOT_SIZE=5GB" >> /etc/sysconfig/docker-storage-setup - - if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then - - pvcreate -f ${device_path} - vgcreate docker ${device_path} - - echo "VG=docker" >> /etc/sysconfig/docker-storage-setup - else - echo "DATA_SIZE=95%FREE" >> /etc/sysconfig/docker-storage-setup - fi - - docker-storage-setup -} diff --git a/magnum/drivers/common/templates/fragments/enable-docker-registry.sh b/magnum/drivers/common/templates/fragments/enable-docker-registry.sh deleted file mode 100644 index abc3c473..00000000 --- a/magnum/drivers/common/templates/fragments/enable-docker-registry.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ "$REGISTRY_ENABLED" = "False" ]; then - exit 0 -fi - -echo "starting docker registry ..." -systemctl daemon-reload -systemctl enable registry -systemctl --no-block start registry diff --git a/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_private.yaml b/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_private.yaml deleted file mode 100644 index 1b06731e..00000000 --- a/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_private.yaml +++ /dev/null @@ -1,21 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a template resource that accepts public and private IPs. - It connects private ip address to its outputs, essentially acting as - one state of a multiplexer. - -parameters: - - public_ip: - type: string - default: "" - - private_ip: - type: string - default: "" - -outputs: - - ip_address: - value: {get_param: private_ip} diff --git a/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_public.yaml b/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_public.yaml deleted file mode 100644 index d4b266c0..00000000 --- a/magnum/drivers/common/templates/fragments/floating_ip_address_switcher_public.yaml +++ /dev/null @@ -1,21 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a template resource that accepts public and private IPs. - It connects public ip address to its outputs, essentially acting as - one state of a multiplexer. - -parameters: - - public_ip: - type: string - default: "" - - private_ip: - type: string - default: "" - -outputs: - - ip_address: - value: {get_param: public_ip} diff --git a/magnum/drivers/common/templates/fragments/network_switcher_existing.yaml b/magnum/drivers/common/templates/fragments/network_switcher_existing.yaml deleted file mode 100644 index e3f14fad..00000000 --- a/magnum/drivers/common/templates/fragments/network_switcher_existing.yaml +++ /dev/null @@ -1,27 +0,0 @@ -heat_template_version: 2014-10-16 - -parameters: - - private_network: - type: string - default: "" - - existing_network: - type: string - default: "" - - private_subnet: - type: string - default: "" - - existing_subnet: - type: string - default: "" - -outputs: - - network: - value: {get_param: existing_network} - - subnet: - value: {get_param: existing_subnet} diff --git a/magnum/drivers/common/templates/fragments/network_switcher_private.yaml b/magnum/drivers/common/templates/fragments/network_switcher_private.yaml deleted file mode 100644 index 107dd43d..00000000 --- a/magnum/drivers/common/templates/fragments/network_switcher_private.yaml +++ /dev/null @@ -1,27 +0,0 @@ -heat_template_version: 2014-10-16 - -parameters: - - private_network: - type: string - default: "" - - existing_network: - type: string - default: "" - - private_subnet: - type: string - default: "" - - existing_subnet: - type: string - default: "" - -outputs: - - network: - value: {get_param: private_network} - - subnet: - value: {get_param: private_subnet} diff --git a/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh b/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh deleted file mode 100644 index a5198e44..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -DOCKER_HTTP_PROXY_CONF=/etc/systemd/system/docker.service.d/http_proxy.conf - -DOCKER_HTTPS_PROXY_CONF=/etc/systemd/system/docker.service.d/https_proxy.conf - -DOCKER_NO_PROXY_CONF=/etc/systemd/system/docker.service.d/no_proxy.conf - -DOCKER_RESTART=0 - -BASH_RC=/etc/bashrc - -mkdir -p /etc/systemd/system/docker.service.d - -if [ -n "$HTTP_PROXY" ]; then - cat < $DOCKER_HTTP_PROXY_CONF - [Service] - Environment=HTTP_PROXY=$HTTP_PROXY -EOF - - DOCKER_RESTART=1 - - if [ -f "$BASH_RC" ]; then - echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting http_proxy" - fi -fi - -if [ -n "$HTTPS_PROXY" ]; then - cat < $DOCKER_HTTPS_PROXY_CONF - [Service] - Environment=HTTPS_PROXY=$HTTPS_PROXY -EOF - - DOCKER_RESTART=1 - - if [ -f "$BASH_RC" ]; then - echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting https_proxy" - fi -fi - -if [ -n "$NO_PROXY" ]; then - cat < $DOCKER_NO_PROXY_CONF - [Service] - Environment=NO_PROXY=$NO_PROXY -EOF - - DOCKER_RESTART=1 - - if [ -f "$BASH_RC" ]; then - echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting no_proxy" - fi -fi - -if [ "$DOCKER_RESTART" -eq 1 ]; then - systemctl daemon-reload - systemctl --no-block restart docker.service -fi diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh deleted file mode 100644 index 85422ac7..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then - - attempts=60 - while [ ${attempts} -gt 0 ]; do - device_name=$(ls /dev/disk/by-id | grep ${ETCD_VOLUME:0:20}$) - if [ -n "${device_name}" ]; then - break - fi - echo "waiting for disk device" - sleep 0.5 - udevadm trigger - let attempts-- - done - - if [ -z "${device_name}" ]; then - echo "ERROR: disk device does not exist" >&2 - exit 1 - fi - - device_path=/dev/disk/by-id/${device_name} - fstype=$(blkid -s TYPE -o value ${device_path}) - if [ "${fstype}" != "xfs" ]; then - mkfs.xfs -f ${device_path} - fi - mkdir -p /var/lib/etcd - echo "${device_path} /var/lib/etcd xfs defaults 0 0" >> /etc/fstab - mount -a - chown -R etcd.etcd /var/lib/etcd - chmod 755 /var/lib/etcd - -fi - -if [ -z "$KUBE_NODE_IP" ]; then - # FIXME(yuanying): Set KUBE_NODE_IP correctly - KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) -fi - -myip="${KUBE_NODE_IP}" -cert_dir="/srv/kubernetes" -protocol="https" - -if [ "$TLS_DISABLED" = "True" ]; then - protocol="http" -fi - -cat > /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf -fi diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh deleted file mode 100644 index cc06b2db..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -echo "configuring kubernetes (master)" - -sed -i ' - /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ -' /etc/kubernetes/config - -KUBE_API_ARGS="--runtime-config=api/all=true" -if [ "$TLS_DISABLED" == "True" ]; then - KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0 --insecure-port=$KUBE_API_PORT" -else - KUBE_API_ADDRESS="--bind-address=0.0.0.0 --secure-port=$KUBE_API_PORT" - # insecure port is used internaly - KUBE_API_ADDRESS="$KUBE_API_ADDRESS --insecure-port=8080" - KUBE_API_ARGS="$KUBE_API_ARGS --tls-cert-file=/srv/kubernetes/server.crt" - KUBE_API_ARGS="$KUBE_API_ARGS --tls-private-key-file=/srv/kubernetes/server.key" - KUBE_API_ARGS="$KUBE_API_ARGS --client-ca-file=/srv/kubernetes/ca.crt" - KUBE_API_ARGS="$KUBE_API_ARGS --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP" -fi - -KUBE_ADMISSION_CONTROL="" -if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then - KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL_LIST}" -fi - -if [ -n "$TRUST_ID" ]; then - KUBE_API_ARGS="$KUBE_API_ARGS --cloud-config=/etc/sysconfig/kube_openstack_config --cloud-provider=openstack" -fi - -sed -i ' - /^KUBE_API_ADDRESS=/ s/=.*/="'"${KUBE_API_ADDRESS}"'"/ - /^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"| - /^KUBE_API_ARGS=/ s|=.*|="'"${KUBE_API_ARGS}"'"| - /^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/ - /^KUBE_ADMISSION_CONTROL=/ s/=.*/="'"${KUBE_ADMISSION_CONTROL}"'"/ -' /etc/kubernetes/apiserver - - -# Add controller manager args -KUBE_CONTROLLER_MANAGER_ARGS="" -if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then - KUBE_CONTROLLER_MANAGER_ARGS="--service-account-private-key-file=/srv/kubernetes/server.key --root-ca-file=/srv/kubernetes/ca.crt" -fi - -if [ -n "$TRUST_ID" ]; then - KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cloud-config=/etc/sysconfig/kube_openstack_config --cloud-provider=openstack" -fi - -sed -i ' - /^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/ - /^KUBE_CONTROLLER_MANAGER_ARGS=/ s#\(KUBE_CONTROLLER_MANAGER_ARGS\).*#\1="'"${KUBE_CONTROLLER_MANAGER_ARGS}"'"# -' /etc/kubernetes/controller-manager - -HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//') -KUBELET_ARGS="--register-node=true --register-schedulable=false --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${HOSTNAME_OVERRIDE}" -KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}" - -# For using default log-driver, other options should be ignored -sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker - -if [ -n "${INSECURE_REGISTRY_URL}" ]; then - KUBELET_ARGS="${KUBELET_ARGS} --pod-infra-container-image=${INSECURE_REGISTRY_URL}/google_containers/pause\:0.8.0" - echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker -fi - -# specified cgroup driver -KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=systemd" - -sed -i ' - /^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/ - /^KUBELET_HOSTNAME=/ s/=.*/=""/ - /^KUBELET_ARGS=/ s|=.*|="'"$KUBELET_ARGS"'"| -' /etc/kubernetes/kubelet diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh deleted file mode 100644 index 77096c58..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -echo "configuring kubernetes (minion)" - -CERT_DIR=/srv/kubernetes -PROTOCOL=https -FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \ --etcd-certfile $CERT_DIR/client.crt \ --etcd-keyfile $CERT_DIR/client.key" -ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \ ---cert $CERT_DIR/client.crt --key $CERT_DIR/client.key" -ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP} -KUBE_PROTOCOL="https" -KUBE_CONFIG="" -FLANNELD_CONFIG=/etc/sysconfig/flanneld - -if [ "$TLS_DISABLED" = "True" ]; then - PROTOCOL=http - FLANNEL_OPTIONS="" - ETCD_CURL_OPTIONS="" -fi - -sed -i '/FLANNEL_OPTIONS/'d $FLANNELD_CONFIG - -cat >> $FLANNELD_CONFIG <= 1.6) -if [ ! -f /usr/bin/udevadm ]; then - ln -s /sbin/udevadm /usr/bin/udevadm -fi - -# For using default log-driver, other options should be ignored -sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker - -if [ -n "${INSECURE_REGISTRY_URL}" ]; then - KUBELET_ARGS="${KUBELET_ARGS} --pod-infra-container-image=${INSECURE_REGISTRY_URL}/google_containers/pause\:0.8.0" - echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker -fi - -# specified cgroup driver -KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=systemd" - -sed -i ' - /^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/ - /^KUBELET_HOSTNAME=/ s/=.*/=""/ - /^KUBELET_API_SERVER=/ s|=.*|="--api-servers='"$KUBE_MASTER_URI"'"| - /^KUBELET_ARGS=/ s|=.*|="'"${KUBELET_ARGS}"'"| -' /etc/kubernetes/kubelet - -sed -i ' - /^KUBE_PROXY_ARGS=/ s|=.*|='"$KUBE_CONFIG"'| -' /etc/kubernetes/proxy - -if [ "$NETWORK_DRIVER" = "flannel" ]; then - sed -i ' - /^FLANNEL_ETCD_ENDPOINTS=/ s|=.*|="'"$PROTOCOL"'://'"$ETCD_SERVER_IP"':2379"| - ' $FLANNELD_CONFIG - - # Make sure etcd has a flannel configuration - . $FLANNELD_CONFIG - until curl -sf $ETCD_CURL_OPTIONS \ - "$FLANNEL_ETCD_ENDPOINTS/v2/keys${FLANNEL_ETCD_PREFIX}/config?quorum=false&recursive=false&sorted=false" - do - echo "Waiting for flannel configuration in etcd..." - sleep 5 - done -fi - -cat >> /etc/environment < ${CORE_DNS} -apiVersion: v1 -kind: ConfigMap -metadata: - name: coredns - namespace: kube-system -data: - Corefile: | - .:53 { - errors - log stdout - health - kubernetes ${DNS_CLUSTER_DOMAIN} { - cidrs ${PORTAL_NETWORK_CIDR} - } - proxy . /etc/resolv.conf - cache 30 - } ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: coredns - namespace: kube-system - labels: - k8s-app: coredns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: coredns - template: - metadata: - labels: - k8s-app: coredns - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - containers: - - name: coredns - image: coredns/coredns:007 - imagePullPolicy: Always - args: [ "-conf", "/etc/coredns/Corefile" ] - volumeMounts: - - name: config-volume - mountPath: /etc/coredns - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - livenessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - dnsPolicy: Default - volumes: - - name: config-volume - configMap: - name: coredns - items: - - key: Corefile - path: Corefile ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-app: coredns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" -spec: - selector: - k8s-app: coredns - clusterIP: ${DNS_SERVICE_IP} - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP -EOF -} - -echo "Waiting for Kubernetes API..." -until curl --silent "http://127.0.0.1:8080/version" -do - sleep 5 -done - -kubectl create --validate=false -f $CORE_DNS diff --git a/magnum/drivers/common/templates/kubernetes/fragments/disable-selinux.sh b/magnum/drivers/common/templates/kubernetes/fragments/disable-selinux.sh deleted file mode 100644 index a28e26d6..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/disable-selinux.sh +++ /dev/null @@ -1,8 +0,0 @@ -#cloud-boothook -#!/bin/sh - -setenforce 0 - -sed -i ' - /^SELINUX=/ s/=.*/=permissive/ -' /etc/selinux/config diff --git a/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-controller-manager-scheduler.sh b/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-controller-manager-scheduler.sh deleted file mode 100644 index aca388fc..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-controller-manager-scheduler.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ -n "${INSECURE_REGISTRY_URL}" ]; then - HYPERKUBE_IMAGE="${INSECURE_REGISTRY_URL}/google_containers/hyperkube:${KUBE_VERSION}" -else - HYPERKUBE_IMAGE="gcr.io/google_containers/hyperkube:${KUBE_VERSION}" -fi - -# vars also used by the Kubernetes config files -unset KUBE_API_PORT -unset KUBE_ALLOW_PRIV - -# this function generate a list of args (one per line) from a list of possibly nested args -# the first parameter is the prefix to be added before each arg -# empty args are ignored -generate_pod_args() { - prefix=$1 - - for var in "${@:2}" ; do - for arg in "$var" ; do - echo "$prefix$arg" - done - done -} - - -init_templates () { - . /etc/kubernetes/config - - . /etc/kubernetes/controller-manager - - local TEMPLATE=/etc/kubernetes/manifests/kube-controller-manager.yaml - [ -f ${TEMPLATE} ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname ${TEMPLATE}) - cat << EOF > ${TEMPLATE} -apiVersion: v1 -kind: Pod -metadata: - name: kube-controller-manager - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: kube-controller-manager - image: ${HYPERKUBE_IMAGE} - command: - - /hyperkube - - controller-manager - - --leader-elect=true -$(generate_pod_args " - " $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_MASTER $KUBE_CONTROLLER_MANAGER_ARGS) - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /healthz - port: 10252 - initialDelaySeconds: ${SYSTEM_PODS_INITIAL_DELAY} - timeoutSeconds: ${SYSTEM_PODS_TIMEOUT} - volumeMounts: - - mountPath: /etc/ssl/certs - name: ssl-certs-host - readOnly: true - - mountPath: /srv/kubernetes - name: kubernetes-config - readOnly: true - - mountPath: /etc/sysconfig - name: sysconfig - readOnly: true - volumes: - - hostPath: - path: /etc/ssl/certs - name: ssl-certs-host - - hostPath: - path: /srv/kubernetes - name: kubernetes-config - - hostPath: - path: /etc/sysconfig - name: sysconfig -EOF - } - - . /etc/kubernetes/scheduler - - local TEMPLATE=/etc/kubernetes/manifests/kube-scheduler.yaml - [ -f ${TEMPLATE} ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname ${TEMPLATE}) - cat << EOF > ${TEMPLATE} -apiVersion: v1 -kind: Pod -metadata: - name: kube-scheduler - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: kube-scheduler - image: ${HYPERKUBE_IMAGE} - command: - - /hyperkube - - scheduler - - --leader-elect=true -$(generate_pod_args " - " $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_MASTER $KUBE_SCHEDULER_ARGS) - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /healthz - port: 10251 - initialDelaySeconds: ${SYSTEM_PODS_INITIAL_DELAY} - timeoutSeconds: ${SYSTEM_PODS_TIMEOUT} - volumeMounts: - - mountPath: /etc/ssl/certs - name: ssl-certs-host - readOnly: true - - mountPath: /srv/kubernetes - name: kubernetes-config - readOnly: true - - mountPath: /etc/sysconfig - name: sysconfig - readOnly: true - volumes: - - hostPath: - path: /etc/ssl/certs - name: ssl-certs-host - - hostPath: - path: /srv/kubernetes - name: kubernetes-config - - hostPath: - path: /etc/sysconfig - name: sysconfig -EOF - } -} - -init_templates diff --git a/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-proxy-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-proxy-master.sh deleted file mode 100644 index cdd22394..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-proxy-master.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ -n "${INSECURE_REGISTRY_URL}" ]; then - HYPERKUBE_IMAGE="${INSECURE_REGISTRY_URL}/google_containers/hyperkube:${KUBE_VERSION}" -else - HYPERKUBE_IMAGE="gcr.io/google_containers/hyperkube:${KUBE_VERSION}" -fi - -init_templates () { - local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml - [ -f ${TEMPLATE} ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname ${TEMPLATE}) - cat << EOF > ${TEMPLATE} -apiVersion: v1 -kind: Pod -metadata: - name: kube-proxy - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: kube-proxy - image: ${HYPERKUBE_IMAGE} - command: - - /hyperkube - - proxy - - --master=http://127.0.0.1:8080 - - --logtostderr=true - - --v=0 - securityContext: - privileged: true -EOF - } -} - -init_templates diff --git a/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-proxy-minion.sh b/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-proxy-minion.sh deleted file mode 100644 index 231c7ca2..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/enable-kube-proxy-minion.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ -n "${INSECURE_REGISTRY_URL}" ]; then - HYPERKUBE_IMAGE="${INSECURE_REGISTRY_URL}/google_containers/hyperkube:${KUBE_VERSION}" -else - HYPERKUBE_IMAGE="gcr.io/google_containers/hyperkube:${KUBE_VERSION}" -fi - -init_templates () { - local KUBE_PROTOCOL="https" - local KUBE_CONFIG="/srv/kubernetes/kubeconfig.yaml" - if [ "${TLS_DISABLED}" = "True" ]; then - KUBE_PROTOCOL="http" - KUBE_CONFIG= - fi - - local MASTER="${KUBE_PROTOCOL}://${KUBE_MASTER_IP}:${KUBE_API_PORT}" - local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml - [ -f ${TEMPLATE} ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname ${TEMPLATE}) - cat << EOF > ${TEMPLATE} -apiVersion: v1 -kind: Pod -metadata: - name: kube-proxy - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: kube-proxy - image: ${HYPERKUBE_IMAGE} - command: - - /hyperkube - - proxy - - --master=${MASTER} - - --kubeconfig=${KUBE_CONFIG} - - --logtostderr=true - - --v=0 - securityContext: - privileged: true - volumeMounts: - - mountPath: /srv/kubernetes - name: "srv-kube" - readOnly: true - volumes: - - hostPath: - path: "/srv/kubernetes" - name: "srv-kube" -EOF - } -} - -init_templates diff --git a/magnum/drivers/common/templates/kubernetes/fragments/enable-monitoring.sh b/magnum/drivers/common/templates/kubernetes/fragments/enable-monitoring.sh deleted file mode 100644 index 282f7322..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/enable-monitoring.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash - -. /etc/sysconfig/heat-params - -if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "false" ]; then - exit 0 -fi - -function writeFile { - # $1 is filename - # $2 is file content - - [ -f ${1} ] || { - echo "Writing File: $1" - mkdir -p $(dirname ${1}) - cat << EOF > ${1} -$2 -EOF - } -} - -KUBE_MON_BIN=/usr/local/bin/kube-enable-monitoring -KUBE_MON_SERVICE=/etc/systemd/system/kube-enable-monitoring.service -GRAFANA_DEF_DASHBOARDS="/var/lib/grafana/dashboards" -GRAFANA_DEF_DASHBOARD_FILE=$GRAFANA_DEF_DASHBOARDS"/default.json" - -# Write the binary for enable-monitoring -KUBE_MON_BIN_CONTENT='''#!/bin/sh -until curl -sf "http://127.0.0.1:8080/healthz" -do - echo "Waiting for Kubernetes API..." - sleep 5 -done - -# Check if all resources exist already before creating them -# Check if configmap Prometheus exists -kubectl get configmap prometheus -n kube-system -if [ "$?" != "0" ] && \ - [ -f "/srv/kubernetes/monitoring/prometheusConfigMap.yaml" ]; then - kubectl create -f /srv/kubernetes/monitoring/prometheusConfigMap.yaml -fi - -# Check if deployment and service Prometheus exist -kubectl get service prometheus -n kube-system | kubectl get deployment prometheus -n kube-system -if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \ - [ -f "/srv/kubernetes/monitoring/prometheusService.yaml" ]; then - kubectl create -f /srv/kubernetes/monitoring/prometheusService.yaml -fi - -# Check if configmap graf-dash exists -kubectl get configmap graf-dash -n kube-system -if [ "$?" != "0" ] && \ - [ -f '''$GRAFANA_DEF_DASHBOARD_FILE''' ]; then - kubectl create configmap graf-dash --from-file='''$GRAFANA_DEF_DASHBOARD_FILE''' -n kube-system -fi - -# Check if deployment and service Grafana exist -kubectl get service grafana -n kube-system | kubectl get deployment grafana -n kube-system -if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \ - [ -f "/srv/kubernetes/monitoring/grafanaService.yaml" ]; then - kubectl create -f /srv/kubernetes/monitoring/grafanaService.yaml -fi - -# Wait for Grafana pod and then inject data source -while true -do - echo "Waiting for Grafana pod to be up and Running" - if [ "$(kubectl get po -n kube-system -l name=grafana -o jsonpath={..phase})" = "Running" ]; then - break - fi - sleep 2 -done - -# Which node is running Grafana -NODE_IP=`kubectl get po -n kube-system -o jsonpath={.items[0].status.hostIP} -l name=grafana` -PROM_SERVICE_IP=`kubectl get svc prometheus --namespace kube-system -o jsonpath={..clusterIP}` - -# The Grafana pod might be running but the app might still be initiating -echo "Check if Grafana is ready..." -curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1 -until [ $? -eq 0 ] -do - sleep 2 - curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1 -done - -# Inject Prometheus datasource into Grafana -while true -do - INJECT=`curl --user admin:$ADMIN_PASSWD -X POST \ - -H "Content-Type: application/json;charset=UTF-8" \ - --data-binary '''"'"'''{"name":"k8sPrometheus","isDefault":true, - "type":"prometheus","url":"http://'''"'"'''$PROM_SERVICE_IP'''"'"''':9090","access":"proxy"}'''"'"'''\ - "http://$NODE_IP:3000/api/datasources/"` - - if [[ "$INJECT" = *"Datasource added"* ]]; then - echo "Prometheus datasource injected into Grafana" - break - fi - echo "Trying to inject Prometheus datasource into Grafana - "$INJECT -done -''' -writeFile $KUBE_MON_BIN "$KUBE_MON_BIN_CONTENT" - - -# Write the monitoring service -KUBE_MON_SERVICE_CONTENT='''[Unit] -Requires=kubelet.service - -[Service] -Type=oneshot -Environment=HOME=/root -EnvironmentFile=-/etc/kubernetes/config -ExecStart='''${KUBE_MON_BIN}''' - -[Install] -WantedBy=multi-user.target -''' -writeFile $KUBE_MON_SERVICE "$KUBE_MON_SERVICE_CONTENT" - -chown root:root ${KUBE_MON_BIN} -chmod 0755 ${KUBE_MON_BIN} - -chown root:root ${KUBE_MON_SERVICE} -chmod 0644 ${KUBE_MON_SERVICE} - -# Download the default JSON Grafana dashboard -# Not a crucial step, so allow it to fail -# TODO: this JSON should be passed into the minions as gzip in cloud-init -GRAFANA_DASHB_URL="https://grafana.net/api/dashboards/1621/revisions/1/download" -mkdir -p $GRAFANA_DEF_DASHBOARDS -curl $GRAFANA_DASHB_URL -o $GRAFANA_DEF_DASHBOARD_FILE || echo "Failed to fetch default Grafana dashboard" -if [ -f $GRAFANA_DEF_DASHBOARD_FILE ]; then - sed -i -- 's|${DS_PROMETHEUS}|k8sPrometheus|g' $GRAFANA_DEF_DASHBOARD_FILE -fi - -# Launch the monitoring service -systemctl enable kube-enable-monitoring -systemctl start --no-block kube-enable-monitoring diff --git a/magnum/drivers/common/templates/kubernetes/fragments/enable-node-exporter.sh b/magnum/drivers/common/templates/kubernetes/fragments/enable-node-exporter.sh deleted file mode 100644 index fbcdfd0c..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/enable-node-exporter.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "false" ]; then - exit 0 -fi - -# Write node-exporter manifest as a regular pod -cat > /etc/kubernetes/manifests/node-exporter.yaml << EOF -apiVersion: v1 -kind: Pod -metadata: - name: node-exporter - namespace: kube-system - annotations: - prometheus.io/scrape: "true" - labels: - app: node-exporter -spec: - containers: - - name: node-exporter - image: prom/node-exporter - ports: - - containerPort: 9100 - hostPort: 9100 -EOF diff --git a/magnum/drivers/common/templates/kubernetes/fragments/enable-services-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/enable-services-master.sh deleted file mode 100644 index cc13a02c..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/enable-services-master.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -# make sure we pick up any modified unit files -systemctl daemon-reload - -echo "starting services" -for service in etcd docker kube-apiserver kubelet; do - echo "activating service $service" - systemctl enable $service - systemctl --no-block start $service -done diff --git a/magnum/drivers/common/templates/kubernetes/fragments/enable-services-minion.sh b/magnum/drivers/common/templates/kubernetes/fragments/enable-services-minion.sh deleted file mode 100644 index 0253a373..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/enable-services-minion.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -# docker is already enabled and possibly running on centos atomic host -# so we need to stop it first and delete the docker0 bridge (which will -# be re-created using the flannel-provided subnet). -echo "stopping docker" -systemctl stop docker -ip link del docker0 - -# make sure we pick up any modified unit files -systemctl daemon-reload - -for service in docker kubelet; do - echo "activating service $service" - systemctl enable $service - systemctl --no-block start $service -done diff --git a/magnum/drivers/common/templates/kubernetes/fragments/kube-dashboard-service.sh b/magnum/drivers/common/templates/kubernetes/fragments/kube-dashboard-service.sh deleted file mode 100644 index b357c771..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/kube-dashboard-service.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/sh - -# this service is required because docker will start only after cloud init was finished -# due to the service dependencies in Fedora Atomic (docker <- docker-storage-setup <- cloud-final) - - -. /etc/sysconfig/heat-params - -if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "false" ]; then - exit 0 -fi - -if [ -n "${INSECURE_REGISTRY_URL}" ]; then - KUBE_DASH_IMAGE="${INSECURE_REGISTRY_URL}/google_containers/kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}" -else - KUBE_DASH_IMAGE="gcr.io/google_containers/kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}" -fi - -KUBE_DASH_DEPLOY=/srv/kubernetes/manifests/kube-dash-deploy.yaml - -[ -f ${KUBE_DASH_DEPLOY} ] || { - echo "Writing File: $KUBE_DASH_DEPLOY" - mkdir -p $(dirname ${KUBE_DASH_DEPLOY}) - cat << EOF > ${KUBE_DASH_DEPLOY} -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - labels: - app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - app: kubernetes-dashboard - template: - metadata: - labels: - app: kubernetes-dashboard - # Comment the following annotation if Dashboard must not be deployed on master - annotations: - scheduler.alpha.kubernetes.io/tolerations: | - [ - { - "key": "dedicated", - "operator": "Equal", - "value": "master", - "effect": "NoSchedule" - } - ] - spec: - containers: - - name: kubernetes-dashboard - image: ${KUBE_DASH_IMAGE} - imagePullPolicy: Always - ports: - - containerPort: 9090 - protocol: TCP - args: - livenessProbe: - httpGet: - path: / - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 -EOF -} - -KUBE_DASH_SVC=/srv/kubernetes/manifests/kube-dash-svc.yaml -[ -f ${KUBE_DASH_SVC} ] || { - echo "Writing File: $KUBE_DASH_SVC" - mkdir -p $(dirname ${KUBE_DASH_SVC}) - cat << EOF > ${KUBE_DASH_SVC} -kind: Service -apiVersion: v1 -metadata: - labels: - app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system -spec: - type: NodePort - ports: - - port: 80 - targetPort: 9090 - selector: - app: kubernetes-dashboard -EOF -} - -KUBE_DASH_BIN=/usr/local/bin/kube-dash -[ -f ${KUBE_DASH_BIN} ] || { - echo "Writing File: $KUBE_DASH_BIN" - mkdir -p $(dirname ${KUBE_DASH_BIN}) - cat << EOF > ${KUBE_DASH_BIN} -#!/bin/sh -until curl -sf "http://127.0.0.1:8080/healthz" -do - echo "Waiting for Kubernetes API..." - sleep 5 -done - -#echo check for existence of kubernetes-dashboard deployment -/usr/bin/kubectl get deployment kube-dashboard --namespace=kube-system - -if [ "\$?" != "0" ]; then - /usr/bin/kubectl create -f /srv/kubernetes/manifests/kube-dash-deploy.yaml --namespace=kube-system -fi - -#echo check for existence of kubernetes-dashboard service -/usr/bin/kubectl get service kubernetes-dashboard --namespace=kube-system - -if [ "\$?" != "0" ]; then - /usr/bin/kubectl create -f /srv/kubernetes/manifests/kube-dash-svc.yaml --namespace=kube-system -fi -EOF -} - -KUBE_DASH_SERVICE=/etc/systemd/system/kube-dash.service -[ -f ${KUBE_DASH_SERVICE} ] || { - echo "Writing File: $KUBE_DASH_SERVICE" - mkdir -p $(dirname ${KUBE_DASH_SERVICE}) - cat << EOF > ${KUBE_DASH_SERVICE} -[Unit] -After=kube-system-namespace.service -Requires=kubelet.service -Wants=kube-system-namespace.service - -[Service] -Type=oneshot -Environment=HOME=/root -EnvironmentFile=-/etc/kubernetes/config -ExecStart=${KUBE_DASH_BIN} - -[Install] -WantedBy=multi-user.target -EOF -} - -chown root:root ${KUBE_DASH_BIN} -chmod 0755 ${KUBE_DASH_BIN} - -chown root:root ${KUBE_DASH_SERVICE} -chmod 0644 ${KUBE_DASH_SERVICE} - -systemctl enable kube-dash -systemctl start --no-block kube-dash diff --git a/magnum/drivers/common/templates/kubernetes/fragments/kube-system-namespace-service.sh b/magnum/drivers/common/templates/kubernetes/fragments/kube-system-namespace-service.sh deleted file mode 100644 index a530c3e3..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/kube-system-namespace-service.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/sh - -# this service required because docker will start only after cloud init was finished -# due service dependencies at Fedora Atomic (docker <- docker-storage-setup <- cloud-final) - -. /etc/sysconfig/heat-params - -KUBE_SYSTEM_JSON=/srv/kubernetes/kube-system-namespace.json -[ -f ${KUBE_SYSTEM_JSON} ] || { - echo "Writing File: $KUBE_SYSTEM_JSON" - mkdir -p $(dirname ${KUBE_SYSTEM_JSON}) - cat << EOF > ${KUBE_SYSTEM_JSON} -{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": "kube-system" - } -} -EOF -} - -KUBE_SYSTEM_BIN=/usr/local/bin/kube-system-namespace -[ -f ${KUBE_SYSTEM_BIN} ] || { - echo "Writing File: $KUBE_SYSTEM_BIN" - mkdir -p $(dirname ${KUBE_SYSTEM_BIN}) - cat << EOF > ${KUBE_SYSTEM_BIN} -#!/bin/sh -until curl -sf "http://127.0.0.1:8080/healthz" -do - echo "Waiting for Kubernetes API..." - sleep 5 -done - -#check for existence of namespace -/usr/bin/kubectl get namespace kube-system - -if [ "\$?" != "0" ]; then - /usr/bin/kubectl create -f /srv/kubernetes/kube-system-namespace.json -fi -EOF -} - -KUBE_SYSTEM_SERVICE=/etc/systemd/system/kube-system-namespace.service -[ -f ${KUBE_SYSTEM_SERVICE} ] || { - echo "Writing File: $KUBE_SYSTEM_SERVICE" - mkdir -p $(dirname ${KUBE_SYSTEM_SERVICE}) - cat << EOF > ${KUBE_SYSTEM_SERVICE} -[Unit] -After=kubelet.service -Requires=kubelet.service - -[Service] -Type=oneshot -Environment=HOME=/root -EnvironmentFile=-/etc/kubernetes/config -ExecStart=${KUBE_SYSTEM_BIN} - -[Install] -WantedBy=multi-user.target -EOF -} - -chown root:root ${KUBE_SYSTEM_BIN} -chmod 0755 ${KUBE_SYSTEM_BIN} - -chown root:root ${KUBE_SYSTEM_SERVICE} -chmod 0644 ${KUBE_SYSTEM_SERVICE} - -systemctl enable kube-system-namespace -systemctl start --no-block kube-system-namespace diff --git a/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh b/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh deleted file mode 100644 index a9f478b4..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/sh - -# Copyright 2014 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -. /etc/sysconfig/heat-params - -set -o errexit -set -o nounset -set -o pipefail - -if [ "$TLS_DISABLED" == "True" ]; then - exit 0 -fi - -cert_dir=/srv/kubernetes -cert_conf_dir=${cert_dir}/conf - -mkdir -p "$cert_dir" -mkdir -p "$cert_conf_dir" - -CA_CERT=$cert_dir/ca.crt -CLIENT_CERT=$cert_dir/client.crt -CLIENT_CSR=$cert_dir/client.csr -CLIENT_KEY=$cert_dir/client.key - -#Get a token by user credentials and trust -auth_json=$(cat << EOF -{ - "auth": { - "identity": { - "methods": [ - "password" - ], - "password": { - "user": { - "id": "$TRUSTEE_USER_ID", - "password": "$TRUSTEE_PASSWORD" - } - } - } - } -} -EOF -) - -content_type='Content-Type: application/json' -url="$AUTH_URL/auth/tokens" -USER_TOKEN=`curl -k -s -i -X POST -H "$content_type" -d "$auth_json" $url \ - | grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` - -# Get CA certificate for this cluster -curl -k -X GET \ - -H "X-Auth-Token: $USER_TOKEN" \ - -H "OpenStack-API-Version: container-infra latest" \ - $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT - -# Create config for client's csr -cat > ${cert_conf_dir}/client.conf < ${CLIENT_CERT} - -# Common certs and key are created for both etcd and kubernetes services. -# Both etcd and kube user should have permission to access the certs and key. -groupadd kube_etcd -usermod -a -G kube_etcd etcd -usermod -a -G kube_etcd kube -chmod 550 "${cert_dir}" -chown -R kube:kube_etcd "${cert_dir}" -chmod 440 $CLIENT_KEY - -sed -i ' - s|CA_CERT|'"$CA_CERT"'| - s|CLIENT_CERT|'"$CLIENT_CERT"'| - s|CLIENT_KEY|'"$CLIENT_KEY"'| -' /srv/kubernetes/kubeconfig.yaml diff --git a/magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh b/magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh deleted file mode 100644 index ab375d08..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/bin/sh - -# Copyright 2014 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -. /etc/sysconfig/heat-params - -set -o errexit -set -o nounset -set -o pipefail - -if [ "$TLS_DISABLED" == "True" ]; then - exit 0 -fi - -if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then - KUBE_NODE_PUBLIC_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) -fi -if [[ -z "${KUBE_NODE_IP}" ]]; then - KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) -fi - -sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}" -if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \ - && [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then - sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}" -fi -if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \ - && [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then - sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}" -fi -MASTER_HOSTNAME=${MASTER_HOSTNAME:-} -if [[ -n "${MASTER_HOSTNAME}" ]]; then - sans="${sans},DNS:${MASTER_HOSTNAME}" -fi - -if [[ -n "${ETCD_LB_VIP}" ]]; then - sans="${sans},IP:${ETCD_LB_VIP}" -fi - -sans="${sans},IP:127.0.0.1" - -KUBE_SERVICE_IP=$(echo $PORTAL_NETWORK_CIDR | awk 'BEGIN{FS="[./]"; OFS="."}{print $1,$2,$3,$4 + 1}') - -sans="${sans},IP:${KUBE_SERVICE_IP}" - -sans="${sans},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local" - -cert_dir=/srv/kubernetes -cert_conf_dir=${cert_dir}/conf - -mkdir -p "$cert_dir" -mkdir -p "$cert_conf_dir" - -CA_CERT=$cert_dir/ca.crt -SERVER_CERT=$cert_dir/server.crt -SERVER_CSR=$cert_dir/server.csr -SERVER_KEY=$cert_dir/server.key - -#Get a token by user credentials and trust -auth_json=$(cat << EOF -{ - "auth": { - "identity": { - "methods": [ - "password" - ], - "password": { - "user": { - "id": "$TRUSTEE_USER_ID", - "password": "$TRUSTEE_PASSWORD" - } - } - } - } -} -EOF -) - -content_type='Content-Type: application/json' -url="$AUTH_URL/auth/tokens" -USER_TOKEN=`curl -k -s -i -X POST -H "$content_type" -d "$auth_json" $url \ - | grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` - -# Get CA certificate for this cluster -curl -k -X GET \ - -H "X-Auth-Token: $USER_TOKEN" \ - -H "OpenStack-API-Version: container-infra latest" \ - $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT} - -# Create config for server's csr -cat > ${cert_conf_dir}/server.conf < ${SERVER_CERT} - -# Common certs and key are created for both etcd and kubernetes services. -# Both etcd and kube user should have permission to access the certs and key. -groupadd kube_etcd -usermod -a -G kube_etcd etcd -usermod -a -G kube_etcd kube -chmod 550 "${cert_dir}" -chown -R kube:kube_etcd "${cert_dir}" -chmod 440 $SERVER_KEY diff --git a/magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh b/magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh deleted file mode 100644 index 643179cf..00000000 --- a/magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -if [ "$NETWORK_DRIVER" != "flannel" ]; then - exit 0 -fi -CERT_DIR=/srv/kubernetes -PROTOCOL=https -FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \ --etcd-certfile $CERT_DIR/server.crt \ --etcd-keyfile $CERT_DIR/server.key" -ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \ ---cert $CERT_DIR/server.crt --key $CERT_DIR/server.key" -FLANNELD_CONFIG=/etc/sysconfig/flanneld - -if [ "$TLS_DISABLED" = "True" ]; then - PROTOCOL=http - FLANNEL_OPTIONS="" - ETCD_CURL_OPTIONS="" -fi - -sed -i '/FLANNEL_OPTIONS/'d $FLANNELD_CONFIG - -cat >> $FLANNELD_CONFIG < $FLANNEL_CONFIG_BIN <&2 - exit 1 -fi - -if [ -z "$FLANNEL_ETCD_ENDPOINTS" ] || [ -z "$FLANNEL_ETCD_PREFIX" ]; then - echo "ERROR: missing required configuration" >&2 - exit 1 -fi - -echo "creating flanneld config in etcd" -while ! curl -sf -L $ETCD_CURL_OPTIONS \ - $FLANNEL_ETCD_ENDPOINTS/v2/keys${FLANNEL_ETCD_PREFIX}/config \ - -X PUT --data-urlencode value@${FLANNEL_JSON}; do - echo "waiting for etcd" - sleep 1 -done -EOF - -cat > $FLANNEL_CONFIG_SERVICE <> $FLANNEL_DOCKER_BRIDGE_BIN <&2 - exit 1 -fi - -mkdir -p /run/flannel/ -cat > /run/flannel/docker <> $FLANNEL_DOCKER_BRIDGE_SERVICE <> $DOCKER_FLANNEL_CONF <> $FLANNEL_DOCKER_BRIDGE_CONF < $WC_NOTIFY_BIN < $WC_NOTIFY_SERVICE < $KUBE_OS_CLOUD_CONFIG < $FLANNEL_JSON < - Creates network resources for the cluster. allocate a network and - router for our server. - -parameters: - existing_network: - type: string - default: "" - - existing_subnet: - type: string - default: "" - - private_network_cidr: - type: string - description: network range for fixed ip network - - private_network_name: - type: string - description: fixed network name - default: "" - - dns_nameserver: - type: string - description: address of a dns nameserver reachable in your environment - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - -resources: - private_network: - type: Magnum::Optional::Neutron::Net - properties: - name: {get_param: private_network_name} - - private_subnet: - type: Magnum::Optional::Neutron::Subnet - properties: - cidr: {get_param: private_network_cidr} - network: {get_resource: private_network} - dns_nameservers: - - {get_param: dns_nameserver} - - extrouter: - type: Magnum::Optional::Neutron::Router - properties: - external_gateway_info: - network: {get_param: external_network} - - extrouter_inside: - type: Magnum::Optional::Neutron::RouterInterface - properties: - router_id: {get_resource: extrouter} - subnet: {get_resource: private_subnet} - - network_switch: - type: Magnum::NetworkSwitcher - properties: - private_network: {get_resource: private_network} - private_subnet: {get_resource: private_subnet} - existing_network: {get_param: existing_network} - existing_subnet: {get_param: existing_subnet} - -outputs: - fixed_network: - description: > - Network ID where to provision machines - value: {get_attr: [network_switch, network]} - fixed_subnet: - description: > - Subnet ID where to provision machines - value: {get_attr: [network_switch, subnet]} diff --git a/magnum/drivers/common/templates/swarm/fragments/add-docker-daemon-options.sh b/magnum/drivers/common/templates/swarm/fragments/add-docker-daemon-options.sh deleted file mode 100644 index b08eb313..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/add-docker-daemon-options.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -opts="-H fd:// -H tcp://0.0.0.0:2375 " - -if [ "$TLS_DISABLED" = 'False' ]; then - opts=$opts"--tlsverify --tlscacert=/etc/docker/ca.crt " - opts=$opts"--tlskey=/etc/docker/server.key " - opts=$opts"--tlscert=/etc/docker/server.crt " -fi - -sed -i '/^OPTIONS=/ s#\(OPTIONS='"'"'\)#\1'"$opts"'#' /etc/sysconfig/docker diff --git a/magnum/drivers/common/templates/swarm/fragments/add-proxy.sh b/magnum/drivers/common/templates/swarm/fragments/add-proxy.sh deleted file mode 100644 index 1005bd9f..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/add-proxy.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -DOCKER_HTTP_PROXY_CONF=/etc/systemd/system/docker.service.d/http_proxy.conf - -DOCKER_HTTPS_PROXY_CONF=/etc/systemd/system/docker.service.d/https_proxy.conf - -DOCKER_NO_PROXY_CONF=/etc/systemd/system/docker.service.d/no_proxy.conf - -DOCKER_RESTART=0 - -BASH_RC=/etc/bashrc - -mkdir -p /etc/systemd/system/docker.service.d - -if [ -n "$HTTP_PROXY" ]; then - cat < $DOCKER_HTTP_PROXY_CONF - [Service] - Environment=HTTP_PROXY=$HTTP_PROXY -EOF - - DOCKER_RESTART=1 - - if [ -f "$BASH_RC" ]; then - echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting http_proxy" - fi -fi - -if [ -n "$HTTPS_PROXY" ]; then - cat < $DOCKER_HTTPS_PROXY_CONF - [Service] - Environment=HTTPS_PROXY=$HTTPS_PROXY -EOF - - DOCKER_RESTART=1 - - if [ -f "$BASH_RC" ]; then - echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting https_proxy" - fi -fi - -if [ -n "$NO_PROXY" ]; then - cat < $DOCKER_NO_PROXY_CONF - [Service] - Environment=NO_PROXY=$NO_PROXY -EOF - - DOCKER_RESTART=1 - - if [ -f "$BASH_RC" ]; then - echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting no_proxy" - fi -else - cat < $DOCKER_NO_PROXY_CONF - [Service] - Environment=NO_PROXY=$SWARM_API_IP,$ETCD_SERVER_IP,$SWARM_NODE_IP -EOF - - DOCKER_RESTART=1 - - if [ -f "$BASH_RC" ]; then - echo "declare -x no_proxy=$SWARM_API_IP,$ETCD_SERVER_IP,$SWARM_NODE_IP" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting no_proxy" - fi -fi - -if [ "$DOCKER_RESTART" -eq 1 ]; then - systemctl daemon-reload - systemctl --no-block restart docker.service -fi diff --git a/magnum/drivers/common/templates/swarm/fragments/cfn-signal.sh b/magnum/drivers/common/templates/swarm/fragments/cfn-signal.sh deleted file mode 100644 index ebe92688..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/cfn-signal.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -echo "notifying heat" - -STATUS="SUCCESS" -REASON="Setup complete" -DATA="OK" -UUID=`uuidgen` - -data=$(echo '{"status": "'${STATUS}'", "reason": "'$REASON'", "data": "'${DATA}'", "id": "'$UUID'"}') - -sh -c "${WAIT_CURL} --data-binary '${data}'" diff --git a/magnum/drivers/common/templates/swarm/fragments/configure-etcd.sh b/magnum/drivers/common/templates/swarm/fragments/configure-etcd.sh deleted file mode 100644 index a8f6bc29..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/configure-etcd.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -myip="$SWARM_NODE_IP" -cert_dir="/etc/docker" -protocol="https" - -if [ "$TLS_DISABLED" = "True" ]; then - protocol="http" -fi - -cat > /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf <> /etc/etcd/etcd.conf -fi diff --git a/magnum/drivers/common/templates/swarm/fragments/configure-selinux.sh b/magnum/drivers/common/templates/swarm/fragments/configure-selinux.sh deleted file mode 100644 index 1d3c0205..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/configure-selinux.sh +++ /dev/null @@ -1,12 +0,0 @@ -#cloud-boothook -#!/bin/sh - -# files in /usr/local/bin should be labeled bin_t -# however on Atomic /usr/local is a symlink to /var/usrlocal -# so the default Fedora policy doesn't work -echo '/var/usrlocal/(.*/)?bin(/.*)? system_u:object_r:bin_t:s0' > /etc/selinux/targeted/contexts/files/file_contexts.local -restorecon -R /usr/local/bin - -# disable selinux until cloud-init is over -# enabled again in enable-services.sh -setenforce 0 diff --git a/magnum/drivers/common/templates/swarm/fragments/enable-services.sh b/magnum/drivers/common/templates/swarm/fragments/enable-services.sh deleted file mode 100644 index b01917a2..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/enable-services.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -set -x - -systemctl stop docker - -echo "starting services" -systemctl daemon-reload -for service in $NODE_SERVICES; do - echo "activating service $service" - systemctl enable $service - systemctl --no-block start $service -done - -setenforce 1 diff --git a/magnum/drivers/common/templates/swarm/fragments/make-cert.py b/magnum/drivers/common/templates/swarm/fragments/make-cert.py deleted file mode 100644 index 844b035f..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/make-cert.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/python - -# Copyright 2015 Rackspace, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -import subprocess -import sys - -import requests - -HEAT_PARAMS_PATH = '/etc/sysconfig/heat-params' -PUBLIC_IP_URL = 'http://169.254.169.254/latest/meta-data/public-ipv4' -CERT_DIR = '/etc/docker' -CERT_CONF_DIR = '%s/conf' % CERT_DIR -CA_CERT_PATH = '%s/ca.crt' % CERT_DIR -SERVER_CONF_PATH = '%s/server.conf' % CERT_CONF_DIR -SERVER_KEY_PATH = '%s/server.key' % CERT_DIR -SERVER_CSR_PATH = '%s/server.csr' % CERT_DIR -SERVER_CERT_PATH = '%s/server.crt' % CERT_DIR - -CSR_CONFIG_TEMPLATE = """ -[req] -distinguished_name = req_distinguished_name -req_extensions = req_ext -x509_extensions = req_ext -prompt = no -copy_extensions = copyall -[req_distinguished_name] -CN = swarm.invalid -[req_ext] -subjectAltName = %(subject_alt_names)s -extendedKeyUsage = clientAuth,serverAuth -""" - - -def _parse_config_value(value): - parsed_value = value - if parsed_value[-1] == '\n': - parsed_value = parsed_value[:-1] - return parsed_value[1:-1] - - -def load_config(): - config = dict() - with open(HEAT_PARAMS_PATH, 'r') as fp: - for line in fp.readlines(): - key, value = line.split('=', 1) - config[key] = _parse_config_value(value) - return config - - -def create_dirs(): - os.makedirs(CERT_CONF_DIR) - - -def _get_public_ip(): - return requests.get(PUBLIC_IP_URL).text - - -def _build_subject_alt_names(config): - subject_alt_names = [ - 'IP:%s' % _get_public_ip(), - 'IP:%s' % config['API_IP_ADDRESS'], - 'IP:%s' % config['SWARM_NODE_IP'], - 'IP:%s' % config['SWARM_API_IP'], - 'IP:127.0.0.1' - ] - return ','.join(subject_alt_names) - - -def write_ca_cert(config): - cluster_cert_url = '%s/certificates/%s' % (config['MAGNUM_URL'], - config['CLUSTER_UUID']) - headers = {'X-Auth-Token': config['USER_TOKEN'], - 'OpenStack-API-Version': 'container-infra latest'} - ca_cert_resp = requests.get(cluster_cert_url, - headers=headers) - - with open(CA_CERT_PATH, 'w') as fp: - fp.write(ca_cert_resp.json()['pem']) - - -def write_server_key(): - subprocess.call(['openssl', 'genrsa', - '-out', SERVER_KEY_PATH, - '4096']) - - -def _write_csr_config(config): - with open(SERVER_CONF_PATH, 'w') as fp: - params = { - 'subject_alt_names': _build_subject_alt_names(config) - } - fp.write(CSR_CONFIG_TEMPLATE % params) - - -def create_server_csr(config): - _write_csr_config(config) - subprocess.call(['openssl', 'req', '-new', - '-days', '1000', - '-key', SERVER_KEY_PATH, - '-out', SERVER_CSR_PATH, - '-reqexts', 'req_ext', - '-extensions', 'req_ext', - '-config', SERVER_CONF_PATH]) - - with open(SERVER_CSR_PATH, 'r') as fp: - return {'cluster_uuid': config['CLUSTER_UUID'], 'csr': fp.read()} - - -def write_server_cert(config, csr_req): - cert_url = '%s/certificates' % config['MAGNUM_URL'] - headers = { - 'Content-Type': 'application/json', - 'X-Auth-Token': config['USER_TOKEN'], - 'OpenStack-API-Version': 'container-infra latest' - } - csr_resp = requests.post(cert_url, - data=json.dumps(csr_req), - headers=headers) - - with open(SERVER_CERT_PATH, 'w') as fp: - fp.write(csr_resp.json()['pem']) - - -def get_user_token(config): - creds_str = ''' -{ - "auth": { - "identity": { - "methods": [ - "password" - ], - "password": { - "user": { - "id": "%(trustee_user_id)s", - "password": "%(trustee_password)s" - } - } - } - } -} -''' - params = { - 'trustee_user_id': config['TRUSTEE_USER_ID'], - 'trustee_password': config['TRUSTEE_PASSWORD'], - } - creds = creds_str % params - headers = {'Content-Type': 'application/json'} - url = config['AUTH_URL'] + '/auth/tokens' - r = requests.post(url, headers=headers, data=creds) - config['USER_TOKEN'] = r.headers['X-Subject-Token'] - return config - - -def main(): - config = load_config() - if config['TLS_DISABLED'] == 'False': - create_dirs() - config = get_user_token(config) - write_ca_cert(config) - write_server_key() - csr_req = create_server_csr(config) - write_server_cert(config, csr_req) - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/magnum/drivers/common/templates/swarm/fragments/network-config-service.sh b/magnum/drivers/common/templates/swarm/fragments/network-config-service.sh deleted file mode 100644 index d6a1b6cd..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/network-config-service.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -echo "Configuring ${NETWORK_DRIVER} network ..." - -if [ "$NETWORK_DRIVER" != "flannel" ]; then - exit 0 -fi - -FLANNELD_CONFIG=/etc/sysconfig/flanneld -FLANNEL_CONFIG_BIN=/usr/local/bin/flannel-config -FLANNEL_CONFIG_SERVICE=/etc/systemd/system/flannel-config.service -FLANNEL_JSON=/etc/sysconfig/flannel-network.json -CERT_DIR=/etc/docker -PROTOCOL=https -FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \ --etcd-certfile $CERT_DIR/server.crt \ --etcd-keyfile $CERT_DIR/server.key" -ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \ ---cert $CERT_DIR/server.crt --key $CERT_DIR/server.key" - -if [ "$TLS_DISABLED" = "True" ]; then - PROTOCOL=http - FLANNEL_OPTIONS="" - ETCD_CURL_OPTIONS="" -fi - -sed -i ' - /^FLANNEL_ETCD=/ s|=.*|="'"$PROTOCOL"'://'"$ETCD_SERVER_IP"':2379"| -' $FLANNELD_CONFIG - -sed -i '/FLANNEL_OPTIONS/'d $FLANNELD_CONFIG - -cat >> $FLANNELD_CONFIG < $FLANNEL_CONFIG_BIN <&2 - exit 1 -fi - -if ! [ "$FLANNEL_ETCD" ] && [ "$FLANNEL_ETCD_KEY" ]; then - echo "ERROR: missing required configuration" >&2 - exit 1 -fi - -echo "creating flanneld config in etcd" -while ! curl -sf -L $ETCD_CURL_OPTIONS \ - $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config \ - -X PUT --data-urlencode value@${FLANNEL_JSON}; do - echo "waiting for etcd" - sleep 1 -done -EOF - -cat > $FLANNEL_CONFIG_SERVICE <> $FLANNELD_CONFIG <> $FLANNEL_DOCKER_BRIDGE_BIN <&2 - exit 1 -fi - -mkdir -p /run/flannel/ -cat > /run/flannel/docker <> $FLANNEL_DOCKER_BRIDGE_SERVICE <> $DOCKER_FLANNEL_CONF <> $FLANNEL_DOCKER_BRIDGE_CONF < $REXRAY_CONFIG < /etc/systemd/system/rexray.service < $FLANNEL_JSON < $CONF_FILE << EOF -[Unit] -Description=Swarm Agent -After=docker.service -Requires=docker.service -OnFailure=swarm-agent-failure.service - -[Service] -TimeoutStartSec=0 -ExecStartPre=-/usr/bin/docker kill swarm-agent -ExecStartPre=-/usr/bin/docker rm swarm-agent -ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION -ExecStart=/usr/bin/docker run -e http_proxy=$HTTP_PROXY \\ - -e https_proxy=$HTTPS_PROXY \\ - -e no_proxy=$NO_PROXY \\ - -v $CERT_DIR:$CERT_DIR:Z \\ - --name swarm-agent \\ - swarm:$SWARM_VERSION \\ - join \\ - --addr $myip:2375 \\ -EOF - -if [ $TLS_DISABLED = 'False' ]; then - -cat >> /etc/systemd/system/swarm-agent.service << END_TLS - --discovery-opt kv.cacertfile=$CERT_DIR/ca.crt \\ - --discovery-opt kv.certfile=$CERT_DIR/server.crt \\ - --discovery-opt kv.keyfile=$CERT_DIR/server.key \\ -END_TLS - -fi - -cat >> /etc/systemd/system/swarm-agent.service << END_SERVICE_BOTTOM - etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/ -Restart=always -ExecStop=/usr/bin/docker stop swarm-agent -ExecStartPost=/usr/local/bin/notify-heat - -[Install] -WantedBy=multi-user.target -END_SERVICE_BOTTOM - -chown root:root $CONF_FILE -chmod 644 $CONF_FILE - -SCRIPT=/usr/local/bin/notify-heat - -UUID=`uuidgen` -cat > $SCRIPT << EOF -#!/bin/sh -until etcdctl \ - --peers $PROTOCOL://$ETCD_SERVER_IP:2379 \ - $ETCDCTL_OPTIONS --timeout 1s \ - --total-timeout 5s \ - ls /v2/keys/swarm/docker/swarm/nodes/$myip:2375 -do - echo "Waiting for swarm agent registration..." - sleep 5 -done - -${WAIT_CURL} \ - --data-binary '{"status": "SUCCESS", "reason": "Swarm agent ready", "data": "OK", "id": "${UUID}"}' -EOF - -chown root:root $SCRIPT -chmod 755 $SCRIPT diff --git a/magnum/drivers/common/templates/swarm/fragments/write-swarm-master-service.sh b/magnum/drivers/common/templates/swarm/fragments/write-swarm-master-service.sh deleted file mode 100644 index 8f4187ac..00000000 --- a/magnum/drivers/common/templates/swarm/fragments/write-swarm-master-service.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/sh - -CERT_DIR=/etc/docker - -if [ -z "$NO_PROXY" ]; then - NO_PROXY=$SWARM_API_IP,$ETCD_SERVER_IP,$SWARM_NODE_IP -fi - -cat > /etc/systemd/system/swarm-manager.service << END_SERVICE_TOP -[Unit] -Description=Swarm Manager -After=docker.service etcd.service -Requires=docker.service etcd.service -OnFailure=swarm-manager-failure.service - -[Service] -TimeoutStartSec=0 -ExecStartPre=-/usr/bin/docker kill swarm-manager -ExecStartPre=-/usr/bin/docker rm swarm-manager -ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION -ExecStart=/usr/bin/docker run --name swarm-manager \\ - -v $CERT_DIR:$CERT_DIR:Z \\ - -p 2376:2375 \\ - -e http_proxy=$HTTP_PROXY \\ - -e https_proxy=$HTTPS_PROXY \\ - -e no_proxy=$NO_PROXY \\ - swarm:$SWARM_VERSION \\ - manage -H tcp://0.0.0.0:2375 \\ - --strategy $SWARM_STRATEGY \\ - --replication \\ - --advertise $NODE_IP:2376 \\ -END_SERVICE_TOP - -if [ $TLS_DISABLED = 'False' ]; then - -cat >> /etc/systemd/system/swarm-manager.service << END_TLS - --tlsverify \\ - --tlscacert=$CERT_DIR/ca.crt \\ - --tlskey=$CERT_DIR/server.key \\ - --tlscert=$CERT_DIR/server.crt \\ - --discovery-opt kv.cacertfile=$CERT_DIR/ca.crt \\ - --discovery-opt kv.certfile=$CERT_DIR/server.crt \\ - --discovery-opt kv.keyfile=$CERT_DIR/server.key \\ -END_TLS - -fi - -UUID=`uuidgen` -cat >> /etc/systemd/system/swarm-manager.service << END_SERVICE_BOTTOM - etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/ -ExecStop=/usr/bin/docker stop swarm-manager -Restart=always -ExecStartPost=/usr/bin/$WAIT_CURL \\ - --data-binary '{"status": "SUCCESS", "reason": "Setup complete", "data": "OK", "id": "$UUID"}' - -[Install] -WantedBy=multi-user.target -END_SERVICE_BOTTOM - -chown root:root /etc/systemd/system/swarm-manager.service -chmod 644 /etc/systemd/system/swarm-manager.service diff --git a/magnum/drivers/heat/__init__.py b/magnum/drivers/heat/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/heat/driver.py b/magnum/drivers/heat/driver.py deleted file mode 100755 index daa7c9bc..00000000 --- a/magnum/drivers/heat/driver.py +++ /dev/null @@ -1,258 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os -import six - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils - -from heatclient.common import template_utils -from heatclient import exc as heatexc - -from magnum.common import clients -from magnum.common import context as mag_ctx -from magnum.common import exception -from magnum.common import short_id -from magnum.conductor.handlers.common import cert_manager -from magnum.conductor.handlers.common import trust_manager -from magnum.conductor import utils as conductor_utils -from magnum.drivers.common import driver -from magnum.i18n import _ -from magnum.objects import fields - - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class HeatDriver(driver.Driver): - '''Base Driver class for using Heat - - Abstract class for implementing Drivers that leverage OpenStack Heat for - orchestrating cluster lifecycle operations - ''' - - def _extract_template_definition(self, context, cluster, - scale_manager=None): - cluster_template = conductor_utils.retrieve_cluster_template(context, - cluster) - definition = self.get_template_definition() - return definition.extract_definition(context, cluster_template, - cluster, - scale_manager=scale_manager) - - def _get_env_files(self, template_path, env_rel_paths): - template_dir = os.path.dirname(template_path) - env_abs_paths = [os.path.join(template_dir, f) for f in env_rel_paths] - environment_files = [] - env_map, merged_env = ( - template_utils.process_multiple_environments_and_files( - env_paths=env_abs_paths, env_list_tracker=environment_files)) - return environment_files, env_map - - @abc.abstractmethod - def get_template_definition(self): - '''return an implementation of - - magnum.drivers.common.drivers.heat.TemplateDefinition - ''' - - raise NotImplementedError("Must implement 'get_template_definition'") - - def update_cluster_status(self, context, cluster): - stack_ctx = mag_ctx.make_cluster_context(cluster) - poller = HeatPoller(clients.OpenStackClients(stack_ctx), context, - cluster, self) - poller.poll_and_check() - - def create_cluster(self, context, cluster, cluster_create_timeout): - stack = self._create_stack(context, clients.OpenStackClients(context), - cluster, cluster_create_timeout) - # TODO(randall): keeping this for now to reduce/eliminate data - # migration. Should probably come up with something more generic in - # the future once actual non-heat-based drivers are implemented. - cluster.stack_id = stack['stack']['id'] - - def update_cluster(self, context, cluster, scale_manager=None, - rollback=False): - self._update_stack(context, clients.OpenStackClients(context), cluster, - scale_manager, rollback) - - def delete_cluster(self, context, cluster): - self._delete_stack(context, clients.OpenStackClients(context), cluster) - - def _create_stack(self, context, osc, cluster, cluster_create_timeout): - template_path, heat_params, env_files = ( - self._extract_template_definition(context, cluster)) - - tpl_files, template = template_utils.get_template_contents( - template_path) - - environment_files, env_map = self._get_env_files(template_path, - env_files) - tpl_files.update(env_map) - - # Make sure no duplicate stack name - stack_name = '%s-%s' % (cluster.name, short_id.generate_id()) - if cluster_create_timeout: - heat_timeout = cluster_create_timeout - else: - # no cluster_create_timeout value was passed in to the request - # so falling back on configuration file value - heat_timeout = cfg.CONF.cluster_heat.create_timeout - fields = { - 'stack_name': stack_name, - 'parameters': heat_params, - 'environment_files': environment_files, - 'template': template, - 'files': tpl_files, - 'timeout_mins': heat_timeout - } - created_stack = osc.heat().stacks.create(**fields) - - return created_stack - - def _update_stack(self, context, osc, cluster, scale_manager=None, - rollback=False): - template_path, heat_params, env_files = ( - self._extract_template_definition(context, cluster, - scale_manager=scale_manager)) - - tpl_files, template = template_utils.get_template_contents( - template_path) - environment_files, env_map = self._get_env_files(template_path, - env_files) - tpl_files.update(env_map) - - fields = { - 'parameters': heat_params, - 'environment_files': environment_files, - 'template': template, - 'files': tpl_files, - 'disable_rollback': not rollback - } - - osc.heat().stacks.update(cluster.stack_id, **fields) - - def _delete_stack(self, context, osc, cluster): - osc.heat().stacks.delete(cluster.stack_id) - - -class HeatPoller(object): - - def __init__(self, openstack_client, context, cluster, cluster_driver): - self.openstack_client = openstack_client - self.context = context - self.cluster = cluster - self.cluster_template = conductor_utils.retrieve_cluster_template( - self.context, cluster) - self.template_def = cluster_driver.get_template_definition() - - def poll_and_check(self): - # TODO(yuanying): temporary implementation to update api_address, - # node_addresses and cluster status - try: - stack = self.openstack_client.heat().stacks.get( - self.cluster.stack_id) - except heatexc.NotFound: - self._sync_missing_heat_stack() - return - - # poll_and_check is detached and polling long time to check status, - # so another user/client can call delete cluster/stack. - if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE: - self._delete_complete() - - if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE, - fields.ClusterStatus.UPDATE_COMPLETE): - self._sync_cluster_and_template_status(stack) - elif stack.stack_status != self.cluster.status: - self._sync_cluster_status(stack) - - if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED, - fields.ClusterStatus.DELETE_FAILED, - fields.ClusterStatus.UPDATE_FAILED, - fields.ClusterStatus.ROLLBACK_COMPLETE, - fields.ClusterStatus.ROLLBACK_FAILED): - self._sync_cluster_and_template_status(stack) - self._cluster_failed(stack) - - def _delete_complete(self): - LOG.info('Cluster has been deleted, stack_id: %s', - self.cluster.stack_id) - try: - trust_manager.delete_trustee_and_trust(self.openstack_client, - self.context, - self.cluster) - cert_manager.delete_certificates_from_cluster(self.cluster, - context=self.context) - except exception.ClusterNotFound: - LOG.info('The cluster %s has been deleted by others.', - self.cluster.uuid) - - def _sync_cluster_status(self, stack): - self.cluster.status = stack.stack_status - self.cluster.status_reason = stack.stack_status_reason - stack_nc_param = self.template_def.get_heat_param( - cluster_attr='node_count') - self.cluster.node_count = stack.parameters[stack_nc_param] - self.cluster.save() - - def get_version_info(self, stack): - stack_param = self.template_def.get_heat_param( - cluster_attr='coe_version') - if stack_param: - self.cluster.coe_version = stack.parameters[stack_param] - - version_module_path = self.template_def.driver_module_path+'.version' - try: - ver = importutils.import_module(version_module_path) - container_version = ver.container_version - except Exception: - container_version = None - self.cluster.container_version = container_version - - def _sync_cluster_and_template_status(self, stack): - self.template_def.update_outputs(stack, self.cluster_template, - self.cluster) - self.get_version_info(stack) - self._sync_cluster_status(stack) - - def _cluster_failed(self, stack): - LOG.error('Cluster error, stack status: %(cluster_status)s, ' - 'stack_id: %(stack_id)s, ' - 'reason: %(reason)s', - {'cluster_status': stack.stack_status, - 'stack_id': self.cluster.stack_id, - 'reason': self.cluster.status_reason}) - - def _sync_missing_heat_stack(self): - if self.cluster.status == fields.ClusterStatus.DELETE_IN_PROGRESS: - self._delete_complete() - elif self.cluster.status == fields.ClusterStatus.CREATE_IN_PROGRESS: - self._sync_missing_stack(fields.ClusterStatus.CREATE_FAILED) - elif self.cluster.status == fields.ClusterStatus.UPDATE_IN_PROGRESS: - self._sync_missing_stack(fields.ClusterStatus.UPDATE_FAILED) - - def _sync_missing_stack(self, new_status): - self.cluster.status = new_status - self.cluster.status_reason = _("Stack with id %s not found in " - "Heat.") % self.cluster.stack_id - self.cluster.save() - LOG.info("Cluster with id %(id)s has been set to " - "%(status)s due to stack with id %(sid)s " - "not found in Heat.", - {'id': self.cluster.id, 'status': self.cluster.status, - 'sid': self.cluster.stack_id}) diff --git a/magnum/drivers/heat/k8s_fedora_template_def.py b/magnum/drivers/heat/k8s_fedora_template_def.py deleted file mode 100644 index f580bc94..00000000 --- a/magnum/drivers/heat/k8s_fedora_template_def.py +++ /dev/null @@ -1,96 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from magnum.drivers.heat import k8s_template_def -from magnum.drivers.heat import template_def -from oslo_config import cfg - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class ServerAddressOutputMapping(template_def.OutputMapping): - - public_ip_output_key = None - private_ip_output_key = None - - def __init__(self, dummy_arg, cluster_attr=None): - self.cluster_attr = cluster_attr - self.heat_output = self.public_ip_output_key - - def set_output(self, stack, cluster_template, cluster): - if not cluster_template.floating_ip_enabled: - self.heat_output = self.private_ip_output_key - - LOG.debug("Using heat_output: %s", self.heat_output) - super(ServerAddressOutputMapping, - self).set_output(stack, cluster_template, cluster) - - -class MasterAddressOutputMapping(ServerAddressOutputMapping): - public_ip_output_key = 'kube_masters' - private_ip_output_key = 'kube_masters_private' - - -class NodeAddressOutputMapping(ServerAddressOutputMapping): - public_ip_output_key = 'kube_minions' - private_ip_output_key = 'kube_minions_private' - - -class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition): - """Kubernetes template for a Fedora.""" - - def __init__(self): - super(K8sFedoraTemplateDefinition, self).__init__() - self.add_parameter('docker_volume_size', - cluster_attr='docker_volume_size') - self.add_parameter('docker_storage_driver', - cluster_template_attr='docker_storage_driver') - self.add_output('kube_minions', - cluster_attr='node_addresses', - mapping_type=NodeAddressOutputMapping) - self.add_output('kube_masters', - cluster_attr='master_addresses', - mapping_type=MasterAddressOutputMapping) - - def get_params(self, context, cluster_template, cluster, **kwargs): - extra_params = kwargs.pop('extra_params', {}) - - extra_params['username'] = context.user_name - extra_params['tenant_name'] = context.tenant - osc = self.get_osc(context) - extra_params['region_name'] = osc.cinder_region_name() - - # set docker_volume_type - # use the configuration default if None provided - docker_volume_type = cluster_template.labels.get( - 'docker_volume_type', CONF.cinder.default_docker_volume_type) - extra_params['docker_volume_type'] = docker_volume_type - - return super(K8sFedoraTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=extra_params, - **kwargs) - - def get_env_files(self, cluster_template, cluster): - env_files = [] - - template_def.add_priv_net_env_file(env_files, cluster_template) - template_def.add_etcd_volume_env_file(env_files, cluster_template) - template_def.add_volume_env_file(env_files, cluster) - template_def.add_lb_env_file(env_files, cluster_template) - template_def.add_fip_env_file(env_files, cluster_template) - - return env_files diff --git a/magnum/drivers/heat/k8s_template_def.py b/magnum/drivers/heat/k8s_template_def.py deleted file mode 100644 index bff1644c..00000000 --- a/magnum/drivers/heat/k8s_template_def.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from magnum.drivers.heat import template_def - -CONF = cfg.CONF - - -"""kubernetes ports """ -KUBE_SECURE_PORT = '6443' -KUBE_INSECURE_PORT = '8080' - - -class K8sApiAddressOutputMapping(template_def.OutputMapping): - - def set_output(self, stack, cluster_template, cluster): - if self.cluster_attr is None: - return - - output_value = self.get_output_value(stack) - if output_value is not None: - # TODO(yuanying): port number is hardcoded, this will be fix - protocol = 'https' - port = KUBE_SECURE_PORT - if cluster_template.tls_disabled: - protocol = 'http' - port = KUBE_INSECURE_PORT - - params = { - 'protocol': protocol, - 'address': output_value, - 'port': port, - } - value = "%(protocol)s://%(address)s:%(port)s" % params - setattr(cluster, self.cluster_attr, value) - - -class K8sTemplateDefinition(template_def.BaseTemplateDefinition): - """Base Kubernetes template.""" - - def __init__(self): - super(K8sTemplateDefinition, self).__init__() - self.add_parameter('master_flavor', - cluster_template_attr='master_flavor_id') - self.add_parameter('minion_flavor', - cluster_template_attr='flavor_id') - self.add_parameter('number_of_minions', - cluster_attr='node_count') - self.add_parameter('external_network', - cluster_template_attr='external_network_id', - required=True) - self.add_parameter('fixed_network', - cluster_template_attr='fixed_network') - self.add_parameter('fixed_subnet', - cluster_template_attr='fixed_subnet') - self.add_parameter('network_driver', - cluster_template_attr='network_driver') - self.add_parameter('volume_driver', - cluster_template_attr='volume_driver') - self.add_parameter('tls_disabled', - cluster_template_attr='tls_disabled', - required=True) - self.add_parameter('registry_enabled', - cluster_template_attr='registry_enabled') - self.add_parameter('cluster_uuid', - cluster_attr='uuid', - param_type=str) - self.add_parameter('insecure_registry_url', - cluster_template_attr='insecure_registry') - self.add_parameter('kube_version', - cluster_attr='coe_version') - - self.add_output('api_address', - cluster_attr='api_address', - mapping_type=K8sApiAddressOutputMapping) - self.add_output('kube_minions_private', - cluster_attr=None) - self.add_output('kube_masters_private', - cluster_attr=None) - - def get_params(self, context, cluster_template, cluster, **kwargs): - extra_params = kwargs.pop('extra_params', {}) - scale_mgr = kwargs.pop('scale_manager', None) - if scale_mgr: - hosts = self.get_output('kube_minions_private') - extra_params['minions_to_remove'] = ( - scale_mgr.get_removal_nodes(hosts)) - - extra_params['discovery_url'] = self.get_discovery_url(cluster) - osc = self.get_osc(context) - extra_params['magnum_url'] = osc.magnum_url() - - if cluster_template.tls_disabled: - extra_params['loadbalancing_protocol'] = 'HTTP' - extra_params['kubernetes_port'] = 8080 - - label_list = ['flannel_network_cidr', 'flannel_backend', - 'flannel_network_subnetlen', - 'system_pods_initial_delay', - 'system_pods_timeout', - 'admission_control_list', - 'prometheus_monitoring', - 'grafana_admin_passwd', - 'kube_dashboard_enabled', - 'etcd_volume_size'] - - for label in label_list: - extra_params[label] = cluster_template.labels.get(label) - - if cluster_template.registry_enabled: - extra_params['swift_region'] = CONF.docker_registry.swift_region - extra_params['registry_container'] = ( - CONF.docker_registry.swift_registry_container) - - return super(K8sTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=extra_params, - **kwargs) diff --git a/magnum/drivers/heat/swarm_fedora_template_def.py b/magnum/drivers/heat/swarm_fedora_template_def.py deleted file mode 100644 index a3c8f5bc..00000000 --- a/magnum/drivers/heat/swarm_fedora_template_def.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from magnum.drivers.heat import template_def -from oslo_config import cfg - -CONF = cfg.CONF -DOCKER_PORT = '2376' - - -class SwarmApiAddressOutputMapping(template_def.OutputMapping): - - def set_output(self, stack, cluster_template, cluster): - if self.cluster_attr is None: - return - - output_value = self.get_output_value(stack) - if output_value is not None: - # Note(rocha): protocol should always be tcp as the docker - # command client does not handle https (see bug #1604812). - params = { - 'protocol': 'tcp', - 'address': output_value, - 'port': DOCKER_PORT, - } - value = "%(protocol)s://%(address)s:%(port)s" % params - setattr(cluster, self.cluster_attr, value) - - -class SwarmFedoraTemplateDefinition(template_def.BaseTemplateDefinition): - """Docker swarm template for a Fedora Atomic VM.""" - - def __init__(self): - super(SwarmFedoraTemplateDefinition, self).__init__() - self.add_parameter('cluster_uuid', - cluster_attr='uuid', - param_type=str) - self.add_parameter('number_of_nodes', - cluster_attr='node_count') - self.add_parameter('master_flavor', - cluster_template_attr='master_flavor_id') - self.add_parameter('node_flavor', - cluster_template_attr='flavor_id') - self.add_parameter('docker_volume_size', - cluster_attr='docker_volume_size') - self.add_parameter('volume_driver', - cluster_template_attr='volume_driver') - self.add_parameter('external_network', - cluster_template_attr='external_network_id', - required=True) - self.add_parameter('fixed_network', - cluster_template_attr='fixed_network') - self.add_parameter('fixed_subnet', - cluster_template_attr='fixed_subnet') - self.add_parameter('network_driver', - cluster_template_attr='network_driver') - self.add_parameter('tls_disabled', - cluster_template_attr='tls_disabled', - required=True) - self.add_parameter('registry_enabled', - cluster_template_attr='registry_enabled') - self.add_parameter('docker_storage_driver', - cluster_template_attr='docker_storage_driver') - self.add_parameter('swarm_version', - cluster_attr='coe_version') - - self.add_output('api_address', - cluster_attr='api_address', - mapping_type=SwarmApiAddressOutputMapping) - self.add_output('swarm_master_private', - cluster_attr=None) - self.add_output('swarm_masters', - cluster_attr='master_addresses') - self.add_output('swarm_nodes_private', - cluster_attr=None) - self.add_output('swarm_nodes', - cluster_attr='node_addresses') - self.add_output('discovery_url', - cluster_attr='discovery_url') - - def get_params(self, context, cluster_template, cluster, **kwargs): - extra_params = kwargs.pop('extra_params', {}) - extra_params['discovery_url'] = self.get_discovery_url(cluster) - # HACK(apmelton) - This uses the user's bearer token, ideally - # it should be replaced with an actual trust token with only - # access to do what the template needs it to do. - osc = self.get_osc(context) - extra_params['magnum_url'] = osc.magnum_url() - - label_list = ['flannel_network_cidr', 'flannel_backend', - 'flannel_network_subnetlen', 'rexray_preempt', - 'swarm_strategy'] - - extra_params['auth_url'] = context.auth_url - - # set docker_volume_type - # use the configuration default if None provided - docker_volume_type = cluster_template.labels.get( - 'docker_volume_type', CONF.cinder.default_docker_volume_type) - extra_params['docker_volume_type'] = docker_volume_type - - for label in label_list: - extra_params[label] = cluster_template.labels.get(label) - - if cluster_template.registry_enabled: - extra_params['swift_region'] = CONF.docker_registry.swift_region - extra_params['registry_container'] = ( - CONF.docker_registry.swift_registry_container) - - return super(SwarmFedoraTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=extra_params, - **kwargs) - - def get_env_files(self, cluster_template, cluster): - env_files = [] - - template_def.add_priv_net_env_file(env_files, cluster_template) - template_def.add_volume_env_file(env_files, cluster) - template_def.add_lb_env_file(env_files, cluster_template) - - return env_files diff --git a/magnum/drivers/heat/swarm_mode_template_def.py b/magnum/drivers/heat/swarm_mode_template_def.py deleted file mode 100644 index 68ab5ec1..00000000 --- a/magnum/drivers/heat/swarm_mode_template_def.py +++ /dev/null @@ -1,130 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from magnum.drivers.heat import template_def -from oslo_config import cfg - -CONF = cfg.CONF -DOCKER_PORT = '2375' - - -class SwarmModeApiAddressOutputMapping(template_def.OutputMapping): - - def set_output(self, stack, cluster_template, cluster): - if self.cluster_attr is None: - return - - output_value = self.get_output_value(stack) - if output_value is not None: - # Note(rocha): protocol should always be tcp as the docker - # command client does not handle https (see bug #1604812). - params = { - 'protocol': 'tcp', - 'address': output_value, - 'port': DOCKER_PORT, - } - value = "%(protocol)s://%(address)s:%(port)s" % params - setattr(cluster, self.cluster_attr, value) - - -class SwarmModeMasterAddressesOutputMapping(template_def.OutputMapping): - - def set_output(self, stack, cluster_template, cluster): - if self.cluster_attr is None: - return - - _master_addresses = [] - for output in stack.to_dict().get('outputs', []): - if output['output_key'] == 'swarm_primary_master': - _master_addresses.append(output['output_value'][0]) - elif output['output_key'] == 'swarm_secondary_masters': - _master_addresses += output['output_value'] - setattr(cluster, self.cluster_attr, _master_addresses) - - -class SwarmModeTemplateDefinition(template_def.BaseTemplateDefinition): - """Docker swarm mode template.""" - - def __init__(self): - super(SwarmModeTemplateDefinition, self).__init__() - self.add_parameter('cluster_uuid', - cluster_attr='uuid', - param_type=str) - self.add_parameter('number_of_nodes', - cluster_attr='node_count') - self.add_parameter('master_flavor', - cluster_template_attr='master_flavor_id') - self.add_parameter('node_flavor', - cluster_template_attr='flavor_id') - self.add_parameter('docker_volume_size', - cluster_attr='docker_volume_size') - self.add_parameter('volume_driver', - cluster_template_attr='volume_driver') - self.add_parameter('external_network', - cluster_template_attr='external_network_id', - required=True) - self.add_parameter('fixed_network', - cluster_template_attr='fixed_network') - self.add_parameter('fixed_subnet', - cluster_template_attr='fixed_subnet') - self.add_parameter('tls_disabled', - cluster_template_attr='tls_disabled', - required=True) - self.add_parameter('docker_storage_driver', - cluster_template_attr='docker_storage_driver') - - self.add_output('api_address', - cluster_attr='api_address', - mapping_type=SwarmModeApiAddressOutputMapping) - self.add_output('swarm_primary_master_private', - cluster_attr=None) - self.add_output('swarm_primary_master', - cluster_attr='master_addresses', - mapping_type=SwarmModeMasterAddressesOutputMapping) - self.add_output('swarm_nodes_private', - cluster_attr=None) - self.add_output('swarm_nodes', - cluster_attr='node_addresses') - - def get_params(self, context, cluster_template, cluster, **kwargs): - extra_params = kwargs.pop('extra_params', {}) - # HACK(apmelton) - This uses the user's bearer token, ideally - # it should be replaced with an actual trust token with only - # access to do what the template needs it to do. - osc = self.get_osc(context) - extra_params['magnum_url'] = osc.magnum_url() - - label_list = ['rexray_preempt'] - - extra_params['auth_url'] = context.auth_url - - for label in label_list: - extra_params[label] = cluster_template.labels.get(label) - - # set docker_volume_type - # use the configuration default if None provided - docker_volume_type = cluster_template.labels.get( - 'docker_volume_type', CONF.cinder.default_docker_volume_type) - extra_params['docker_volume_type'] = docker_volume_type - - return super(SwarmModeTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=extra_params, - **kwargs) - - def get_env_files(self, cluster_template, cluster): - env_files = [] - - template_def.add_priv_net_env_file(env_files, cluster_template) - template_def.add_volume_env_file(env_files, cluster) - template_def.add_lb_env_file(env_files, cluster_template) - - return env_files diff --git a/magnum/drivers/heat/template_def.py b/magnum/drivers/heat/template_def.py deleted file mode 100755 index fbcd90e8..00000000 --- a/magnum/drivers/heat/template_def.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc -import ast - -from oslo_log import log as logging -import requests -import six - -from magnum.common import clients -from magnum.common import exception -import magnum.conf - -from requests import exceptions as req_exceptions - -LOG = logging.getLogger(__name__) - -COMMON_TEMPLATES_PATH = "../../common/templates/" -COMMON_ENV_PATH = COMMON_TEMPLATES_PATH + "environments/" - -CONF = magnum.conf.CONF - - -class ParameterMapping(object): - """A mapping associating heat param and cluster_template attr. - - A ParameterMapping is an association of a Heat parameter name with - an attribute on a Cluster, ClusterTemplate, or both. - - In the case of both cluster_template_attr and cluster_attr being set, the - ClusterTemplate will be checked first and then Cluster if the attribute - isn't set on the ClusterTemplate. - - Parameters can also be set as 'required'. If a required parameter - isn't set, a RequiredArgumentNotProvided exception will be raised. - """ - def __init__(self, heat_param, cluster_template_attr=None, - cluster_attr=None, required=False, - param_type=lambda x: x): - self.heat_param = heat_param - self.cluster_template_attr = cluster_template_attr - self.cluster_attr = cluster_attr - self.required = required - self.param_type = param_type - - def set_param(self, params, cluster_template, cluster): - value = None - - if (self.cluster_template_attr and - getattr(cluster_template, self.cluster_template_attr, None) - is not None): - value = getattr(cluster_template, self.cluster_template_attr) - elif (self.cluster_attr and - getattr(cluster, self.cluster_attr, None) is not None): - value = getattr(cluster, self.cluster_attr) - elif self.required: - kwargs = dict(heat_param=self.heat_param) - raise exception.RequiredParameterNotProvided(**kwargs) - - if value is not None: - value = self.param_type(value) - params[self.heat_param] = value - - -class OutputMapping(object): - """A mapping associating heat outputs and cluster attr. - - An OutputMapping is an association of a Heat output with a key - Magnum understands. - """ - - def __init__(self, heat_output, cluster_attr=None): - self.cluster_attr = cluster_attr - self.heat_output = heat_output - - def set_output(self, stack, cluster_template, cluster): - if self.cluster_attr is None: - return - - output_value = self.get_output_value(stack) - if output_value is not None: - setattr(cluster, self.cluster_attr, output_value) - - def matched(self, output_key): - return self.heat_output == output_key - - def get_output_value(self, stack): - for output in stack.to_dict().get('outputs', []): - if output['output_key'] == self.heat_output: - return output['output_value'] - - LOG.warning('stack does not have output_key %s', self.heat_output) - return None - - -@six.add_metaclass(abc.ABCMeta) -class TemplateDefinition(object): - '''A mapping between Magnum objects and Heat templates. - - A TemplateDefinition is essentially a mapping between Magnum objects - and Heat templates. Each TemplateDefinition has a mapping of Heat - parameters. - ''' - - def __init__(self): - self.param_mappings = list() - self.output_mappings = list() - - def add_parameter(self, *args, **kwargs): - param = ParameterMapping(*args, **kwargs) - self.param_mappings.append(param) - - def add_output(self, *args, **kwargs): - mapping_type = kwargs.pop('mapping_type', OutputMapping) - output = mapping_type(*args, **kwargs) - self.output_mappings.append(output) - - def get_output(self, *args, **kwargs): - for output in self.output_mappings: - if output.matched(*args, **kwargs): - return output - - return None - - def get_params(self, context, cluster_template, cluster, **kwargs): - """Pulls template parameters from ClusterTemplate. - - :param context: Context to pull template parameters for - :param cluster_template: ClusterTemplate to pull template parameters - from - :param cluster: Cluster to pull template parameters from - :param extra_params: Any extra params to be provided to the template - - :return: dict of template parameters - """ - template_params = dict() - - for mapping in self.param_mappings: - mapping.set_param(template_params, cluster_template, cluster) - - if 'extra_params' in kwargs: - template_params.update(kwargs.get('extra_params')) - - return template_params - - def get_env_files(self, cluster_template, cluster): - """Gets stack environment files based upon ClusterTemplate attributes. - - Base implementation returns no files (empty list). Meant to be - overridden by subclasses. - - :param cluster_template: ClusterTemplate to grab environment files for - - :return: list of relative paths to environment files - """ - return [] - - def get_heat_param(self, cluster_attr=None, cluster_template_attr=None): - """Returns stack param name. - - Return stack param name using cluster and cluster_template attributes - :param cluster_attr cluster attribute from which it maps to stack - attribute - :param cluster_template_attr cluster_template attribute from which it - maps to stack attribute - - :return stack parameter name or None - """ - for mapping in self.param_mappings: - if (mapping.cluster_attr == cluster_attr and - mapping.cluster_template_attr == cluster_template_attr): - return mapping.heat_param - - return None - - def update_outputs(self, stack, cluster_template, cluster): - for output in self.output_mappings: - output.set_output(stack, cluster_template, cluster) - - @abc.abstractproperty - def driver_module_path(self): - pass - - @abc.abstractproperty - def template_path(self): - pass - - def extract_definition(self, context, cluster_template, cluster, **kwargs): - return (self.template_path, - self.get_params(context, cluster_template, cluster, **kwargs), - self.get_env_files(cluster_template, cluster)) - - -class BaseTemplateDefinition(TemplateDefinition): - def __init__(self): - super(BaseTemplateDefinition, self).__init__() - self._osc = None - - self.add_parameter('ssh_key_name', - cluster_attr='keypair', - required=True) - self.add_parameter('server_image', - cluster_template_attr='image_id') - self.add_parameter('dns_nameserver', - cluster_template_attr='dns_nameserver') - self.add_parameter('http_proxy', - cluster_template_attr='http_proxy') - self.add_parameter('https_proxy', - cluster_template_attr='https_proxy') - self.add_parameter('no_proxy', - cluster_template_attr='no_proxy') - self.add_parameter('number_of_masters', - cluster_attr='master_count') - - @property - def driver_module_path(self): - pass - - @abc.abstractproperty - def template_path(self): - pass - - def get_osc(self, context): - if not self._osc: - self._osc = clients.OpenStackClients(context) - return self._osc - - def get_params(self, context, cluster_template, cluster, **kwargs): - osc = self.get_osc(context) - - extra_params = kwargs.pop('extra_params', {}) - extra_params['trustee_domain_id'] = osc.keystone().trustee_domain_id - extra_params['trustee_user_id'] = cluster.trustee_user_id - extra_params['trustee_username'] = cluster.trustee_username - extra_params['trustee_password'] = cluster.trustee_password - - # Only pass trust ID into the template if allowed by the config file - if CONF.trust.cluster_user_trust: - extra_params['trust_id'] = cluster.trust_id - else: - extra_params['trust_id'] = "" - - extra_params['auth_url'] = osc.url_for( - service_type='identity', - interface=CONF.trust.trustee_keystone_interface, - version=3) - - return super(BaseTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=extra_params, - **kwargs) - - def validate_discovery_url(self, discovery_url, expect_size): - url = str(discovery_url) - if url[len(url)-1] == '/': - url += '_config/size' - else: - url += '/_config/size' - - try: - result = requests.get(url).text - except req_exceptions.RequestException as err: - LOG.error(six.text_type(err)) - raise exception.GetClusterSizeFailed( - discovery_url=discovery_url) - - try: - result = ast.literal_eval(result) - except (ValueError, SyntaxError): - raise exception.InvalidClusterDiscoveryURL( - discovery_url=discovery_url) - - node_value = result.get('node', None) - if node_value is None: - raise exception.InvalidClusterDiscoveryURL( - discovery_url=discovery_url) - - value = node_value.get('value', None) - if value is None: - raise exception.InvalidClusterDiscoveryURL( - discovery_url=discovery_url) - elif int(value) != expect_size: - raise exception.InvalidClusterSize( - expect_size=expect_size, - size=int(value), - discovery_url=discovery_url) - - def get_discovery_url(self, cluster): - if hasattr(cluster, 'discovery_url') and cluster.discovery_url: - if getattr(cluster, 'master_count', None) is not None: - self.validate_discovery_url(cluster.discovery_url, - cluster.master_count) - else: - self.validate_discovery_url(cluster.discovery_url, 1) - discovery_url = cluster.discovery_url - else: - discovery_endpoint = ( - CONF.cluster.etcd_discovery_service_endpoint_format % - {'size': cluster.master_count}) - try: - discovery_url = requests.get(discovery_endpoint).text - except req_exceptions.RequestException as err: - LOG.error(six.text_type(err)) - raise exception.GetDiscoveryUrlFailed( - discovery_endpoint=discovery_endpoint) - if not discovery_url: - raise exception.InvalidDiscoveryURL( - discovery_url=discovery_url, - discovery_endpoint=discovery_endpoint) - else: - cluster.discovery_url = discovery_url - return discovery_url - - -def add_lb_env_file(env_files, cluster_template): - if cluster_template.master_lb_enabled: - env_files.append(COMMON_ENV_PATH + 'with_master_lb.yaml') - else: - env_files.append(COMMON_ENV_PATH + 'no_master_lb.yaml') - - -def add_volume_env_file(env_files, cluster): - if cluster.docker_volume_size is None: - env_files.append(COMMON_ENV_PATH + 'no_volume.yaml') - else: - env_files.append(COMMON_ENV_PATH + 'with_volume.yaml') - - -def add_etcd_volume_env_file(env_files, cluster_template): - if int(cluster_template.labels.get('etcd_volume_size', 0)) < 1: - env_files.append(COMMON_ENV_PATH + 'no_etcd_volume.yaml') - else: - env_files.append(COMMON_ENV_PATH + 'with_etcd_volume.yaml') - - -def add_fip_env_file(env_files, cluster_template): - if cluster_template.floating_ip_enabled: - env_files.append(COMMON_ENV_PATH + 'enable_floating_ip.yaml') - else: - env_files.append(COMMON_ENV_PATH + 'disable_floating_ip.yaml') - - -def add_priv_net_env_file(env_files, cluster_template): - if cluster_template.fixed_network: - env_files.append(COMMON_ENV_PATH + 'no_private_network.yaml') - else: - env_files.append(COMMON_ENV_PATH + 'with_private_network.yaml') diff --git a/magnum/drivers/k8s_coreos_v1/__init__.py b/magnum/drivers/k8s_coreos_v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/k8s_coreos_v1/driver.py b/magnum/drivers/k8s_coreos_v1/driver.py deleted file mode 100644 index 8b8cc2c6..00000000 --- a/magnum/drivers/k8s_coreos_v1/driver.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.drivers.common import k8s_monitor -from magnum.drivers.common.k8s_scale_manager import K8sScaleManager -from magnum.drivers.heat import driver -from magnum.drivers.k8s_coreos_v1 import template_def - - -class Driver(driver.HeatDriver): - - @property - def provides(self): - return [ - {'server_type': 'vm', - 'os': 'coreos', - 'coe': 'kubernetes'}, - ] - - def get_template_definition(self): - return template_def.CoreOSK8sTemplateDefinition() - - def get_monitor(self, context, cluster): - return k8s_monitor.K8sMonitor(context, cluster) - - def get_scale_manager(self, context, osclient, cluster): - return K8sScaleManager(context, osclient, cluster) diff --git a/magnum/drivers/k8s_coreos_v1/template_def.py b/magnum/drivers/k8s_coreos_v1/template_def.py deleted file mode 100644 index 448dcc7a..00000000 --- a/magnum/drivers/k8s_coreos_v1/template_def.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -import magnum.conf -from magnum.drivers.heat import k8s_template_def -from magnum.drivers.heat import template_def - -CONF = magnum.conf.CONF - - -class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition): - """Kubernetes template for CoreOS VM.""" - - def __init__(self): - super(CoreOSK8sTemplateDefinition, self).__init__() - self.add_output('kube_minions', - cluster_attr='node_addresses') - self.add_output('kube_masters', - cluster_attr='master_addresses') - - def get_env_files(self, cluster_template, cluster): - env_files = [] - - template_def.add_priv_net_env_file(env_files, cluster_template) - template_def.add_lb_env_file(env_files, cluster_template) - template_def.add_fip_env_file(env_files, cluster_template) - - return env_files - - @property - def driver_module_path(self): - return __name__[:__name__.rindex('.')] - - @property - def template_path(self): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'templates/kubecluster.yaml') diff --git a/magnum/drivers/k8s_coreos_v1/templates/COPYING b/magnum/drivers/k8s_coreos_v1/templates/COPYING deleted file mode 100644 index d6456956..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/add-proxy.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/add-proxy.yaml deleted file mode 100644 index fc0abf45..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/add-proxy.yaml +++ /dev/null @@ -1,72 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/add-proxy.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Configure proxy - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/add-proxy.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/add-proxy.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - DOCKER_HTTP_PROXY_CONF=/etc/systemd/system/docker.service.d/http_proxy.conf - - DOCKER_HTTPS_PROXY_CONF=/etc/systemd/system/docker.service.d/https_proxy.conf - - DOCKER_NO_PROXY_CONF=/etc/systemd/system/docker.service.d/no_proxy.conf - - DOCKER_RESTART=0 - - ENVIRONMENT=/etc/environment - - mkdir -p /etc/systemd/system/docker.service.d - - if [ -n "$HTTP_PROXY" ]; then - cat < $DOCKER_HTTP_PROXY_CONF - [Service] - Environment=HTTP_PROXY=$HTTP_PROXY - EOF - - DOCKER_RESTART=1 - - echo "http_proxy=$HTTP_PROXY" >> $ENVIRONMENT - fi - - if [ -n "$HTTPS_PROXY" ]; then - cat < $DOCKER_HTTPS_PROXY_CONF - [Service] - Environment=HTTPS_PROXY=$HTTPS_PROXY - EOF - - DOCKER_RESTART=1 - - echo "https_proxy=$HTTPS_PROXY" >> $ENVIRONMENT - fi - - if [ -n "$NO_PROXY" ]; then - cat < $DOCKER_NO_PROXY_CONF - [Service] - Environment=NO_PROXY=$NO_PROXY - EOF - - DOCKER_RESTART=1 - - echo "no_proxy=$NO_PROXY" >> $ENVIRONMENT - fi - - if [ "$DOCKER_RESTART" -eq 1 ]; then - systemctl daemon-reload - systemctl --no-block restart docker.service - fi diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-docker.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-docker.yaml deleted file mode 100644 index 09600fa5..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-docker.yaml +++ /dev/null @@ -1,36 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/configure-docker.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Configure Docker - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/configure-docker.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/configure-docker.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - if [ -n "${INSECURE_REGISTRY_URL}" ]; then - DOCKER_OPTS="--insecure-registry ${INSECURE_REGISTRY_URL}" - fi - - TEMPLATE=/etc/systemd/system/docker.service.d/docker-opts.conf - mkdir -p $(dirname ${TEMPLATE}) - cat << EOF > $TEMPLATE - [Service] - Environment=DOCKER_OPTS=$DOCKER_OPTS - EOF - - systemctl daemon-reload - systemctl --no-block restart docker.service diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-etcd.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-etcd.yaml deleted file mode 100644 index 8776941e..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-etcd.yaml +++ /dev/null @@ -1,67 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/configure-etcd.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Configure etcd - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/configure-etcd.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/configure-etcd.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - if [ -z "${KUBE_NODE_IP}" ]; then - KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) - fi - - DROP_IN_FILE=/etc/systemd/system/etcd-member.service.d/20-configure-etcd.conf - mkdir -p $(dirname $DROP_IN_FILE) - protocol="https" - - if [ "$TLS_DISABLED" = "True" ]; then - protocol="http" - fi - cat > $DROP_IN_FILE <> $DROP_IN_FILE <> $DROP_IN_FILE - fi - - systemctl enable etcd-member - systemctl --no-block start etcd-member diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/create-kube-namespace.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/create-kube-namespace.yaml deleted file mode 100644 index ff886126..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/create-kube-namespace.yaml +++ /dev/null @@ -1,44 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/create-kube-namespace.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - After=kubelet.service - Requires=kubelet.service - Description=Create kube-system namespace - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/create-kube-namespace.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/create-kube-namespace.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - until curl -sf "http://127.0.0.1:8080/healthz" - do - echo "Waiting for Kubernetes API..." - sleep 5 - done - - KUBE_SYSTEM_JSON=/srv/kubernetes/kube-system-namespace.json - mkdir -p $(dirname ${KUBE_SYSTEM_JSON}) - cat > ${KUBE_SYSTEM_JSON} < $TEMPLATE < $TEMPLATE < $TEMPLATE < $TEMPLATE < /dev/null - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /etc/kubernetes/addons/coredns-cm.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/configmaps" > /dev/null - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /etc/kubernetes/addons/coredns-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /etc/kubernetes/addons/coredns-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-apiserver.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-apiserver.yaml deleted file mode 100644 index 640b6e27..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-apiserver.yaml +++ /dev/null @@ -1,92 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/enable-kube-apiserver.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Configure Kubernetes API Server - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/enable-kube-apiserver.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/enable-kube-apiserver.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - KUBE_ADMISSION_CONTROL="" - if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then - KUBE_ADMISSION_CONTROL="- --admission-control=${ADMISSION_CONTROL_LIST}" - fi - - TLS_CERT_FILE=${KUBE_CERTS_PATH}/apiserver.pem - TLS_PRIVATE_KEY_FILE=${KUBE_CERTS_PATH}/apiserver-key.pem - CLIENT_CA_FILE=${KUBE_CERTS_PATH}/ca.pem - INSECURE_PORT=8080 - SECURE_PORT=${KUBE_API_PORT} - BIND_ADDRESS_CMD="--bind-address=0.0.0.0" - if [ "${TLS_DISABLED}" == "True" ]; then - TLS_CERT_FILE= - TLS_PRIVATE_KEY_FILE= - CLIENT_CA_FILE= - INSECURE_PORT=${KUBE_API_PORT} - SECURE_PORT=0 - BIND_ADDRESS_CMD="--insecure-bind-address=0.0.0.0" - fi - - TEMPLATE=/etc/kubernetes/manifests/kube-apiserver.yaml - mkdir -p $(dirname ${TEMPLATE}) - cat > $TEMPLATE < ${TEMPLATE} < $TEMPLATE < $TEMPLATE < /dev/null - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /etc/kubernetes/addons/kubedash-rc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null - fi diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-proxy-master.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-proxy-master.yaml deleted file mode 100644 index 1b11a665..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kube-proxy-master.yaml +++ /dev/null @@ -1,60 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/enable-kube-proxy.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Configure Kubernetes Proxy - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/enable-kube-proxy-master.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/enable-kube-proxy-master.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml - mkdir -p $(dirname ${TEMPLATE}) - cat > ${TEMPLATE} < ${TEMPLATE} < ${TEMPLATE} < $CONF_FILE < $TEMPLATE - #!/bin/sh - # This is bind mounted into the kubelet rootfs and all rkt shell-outs go - # through this rkt wrapper. It essentially enters the host mount namespace - # (which it is already in) only for the purpose of breaking out of the chroot - # before calling rkt. It makes things like rkt gc work and avoids bind mounting - # in certain rkt filesystem dependancies into the kubelet rootfs. This can - # eventually be obviated when the write-api stuff gets upstream and rkt gc is - # through the api-server. Related issue: - # https://github.com/coreos/rkt/issues/2878 - exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@" - EOF - - systemctl enable kubelet - systemctl --no-block start kubelet diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-minion.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-minion.yaml deleted file mode 100644 index aa47eb23..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-minion.yaml +++ /dev/null @@ -1,109 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/enable-kubelet.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Enable Kubelet - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/enable-kubelet-minion.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/enable-kubelet-minion.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - if [ -z "${KUBE_NODE_IP}" ]; then - KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) - fi - - if [ -n "${INSECURE_REGISTRY_URL}" ]; then - INSECURE_REGISTRY_ARGS="--pod-infra-container-image=${INSECURE_REGISTRY_URL}/google_containers/pause\:3.0" - else - INSECURE_REGISTRY_ARGS="" - fi - - TLS_CERT_FILE=${KUBE_CERTS_PATH}/worker.pem - TLS_PRIVATE_KEY_FILE=${KUBE_CERTS_PATH}/worker-key.pem - KUBE_PROTOCOL="https" - KUBE_CONFIG="/etc/kubernetes/config/worker-kubeconfig.yaml" - if [ "$TLS_DISABLED" == "True" ]; then - TLS_CERT_FILE= - TLS_PRIVATE_KEY_FILE= - KUBE_PROTOCOL="http" - KUBE_CONFIG= - fi - KUBE_MASTER_URI="$KUBE_PROTOCOL://$KUBE_MASTER_IP:$KUBE_API_PORT" - - HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//') - - uuid_file="/var/run/kubelet-pod.uuid" - CONF_FILE=/etc/systemd/system/kubelet.service - cat > $CONF_FILE < $TEMPLATE - #!/bin/sh - # This is bind mounted into the kubelet rootfs and all rkt shell-outs go - # through this rkt wrapper. It essentially enters the host mount namespace - # (which it is already in) only for the purpose of breaking out of the chroot - # before calling rkt. It makes things like rkt gc work and avoids bind mounting - # in certain rkt filesystem dependancies into the kubelet rootfs. This can - # eventually be obviated when the write-api stuff gets upstream and rkt gc is - # through the api-server. Related issue: - # https://github.com/coreos/rkt/issues/2878 - exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@" - EOF - - systemctl enable kubelet - systemctl --no-block start kubelet diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-network-service-client.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-network-service-client.yaml deleted file mode 100644 index e2b11c11..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-network-service-client.yaml +++ /dev/null @@ -1,93 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/enable-network-service.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Enable Network Service - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/enable-network-service.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/enable-network-service.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - if [ "$NETWORK_DRIVER" != "flannel" ]; then - exit 0 - fi - - if [ -z "${KUBE_NODE_IP}" ]; then - KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) - fi - - ETCD_SERVER_IP=${ETCD_SERVER_IP:-127.0.0.1} - - PROTOCOL=https - - if [ "$TLS_DISABLED" = "True" ]; then - PROTOCOL=http - fi - - ENV_FILE=/etc/flannel/options.env - mkdir -p $(dirname $ENV_FILE) - cat > $ENV_FILE <> $ENV_FILE < $DROP_IN_FILE < $DOCKER_FLANNEL_CONF < $CNI - { - "name": "podnet", - "type": "flannel", - "delegate": { - "isDefaultGateway": true - } - } - EOF - - DOCKER_FLANNEL_CONF=/etc/kubernetes/cni/docker_opts_cni.env - mkdir -p $(dirname $DOCKER_FLANNEL_CONF) - cat > $DOCKER_FLANNEL_CONF < $ENV_FILE < $DROP_IN_FILE < $DOCKER_FLANNEL_CONF < $CNI - { - "name": "podnet", - "type": "flannel", - "delegate": { - "isDefaultGateway": true - } - } - EOF - - DOCKER_FLANNEL_CONF=/etc/kubernetes/cni/docker_opts_cni.env - mkdir -p $(dirname $DOCKER_FLANNEL_CONF) - cat > $DOCKER_FLANNEL_CONF < ABCD - key=$(echo "$json_response" | sed 's/^.*"pem": "\([^"]*\)".*$/\1/') - # decode newline characters - key=$(echo "$key" | sed 's/\\n/\n/g') - echo "$key" - } - - set -o errexit - set -o nounset - set -o pipefail - - if [ "$TLS_DISABLED" == "True" ]; then - exit 0 - fi - - cert_conf_dir=${KUBE_CERTS_PATH}/conf - - mkdir -p ${cert_conf_dir} - - CA_CERT=${KUBE_CERTS_PATH}/ca.pem - CLIENT_CERT=${KUBE_CERTS_PATH}/worker.pem - CLIENT_CSR=${KUBE_CERTS_PATH}/worker.csr - CLIENT_KEY=${KUBE_CERTS_PATH}/worker-key.pem - - if [ -f ${CLIENT_CERT} ] || [ -f ${CLIENT_KEY} ] || [ -f ${CLIENT_CSR} ]; then - exit 0 - fi - - #Get a token by user credentials and trust - cat > auth.json << EOF - { - "auth": { - "identity": { - "methods": [ - "password" - ], - "password": { - "user": { - "id": "$TRUSTEE_USER_ID", - "password": "$TRUSTEE_PASSWORD" - } - } - } - } - } - EOF - - USER_TOKEN=`curl -k -s -i -X POST -H "Content-Type: application/json" -d @auth.json \ - $AUTH_URL/auth/tokens | grep X-Subject-Token | awk '{print $2}' | tr -d '\r'` - - rm -rf auth.json - - ca_cert_json=$(curl -k -X GET \ - -H "X-Auth-Token: $USER_TOKEN" \ - -H "OpenStack-API-Version: container-infra latest" \ - $MAGNUM_URL/certificates/$CLUSTER_UUID) - parse_json_response "${ca_cert_json}" > ${CA_CERT} - - # Create config for client's csr - cat > ${cert_conf_dir}/worker-openssl.conf < ${CLIENT_CERT} - - chmod 600 ${KUBE_CERTS_PATH}/*-key.pem - chown root:root ${KUBE_CERTS_PATH}/*-key.pem diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/make-cert.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/make-cert.yaml deleted file mode 100644 index 07daf2d9..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/make-cert.yaml +++ /dev/null @@ -1,154 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/make-cert.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Make TLS certificates - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/make-cert.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/make-cert.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/bash - - # Parse the JSON response that contains the TLS certificate, and print - # out the certificate content. - function parse_json_response { - json_response=$1 - # {..,"pem": "ABCD",..} -> ABCD - key=$(echo "$json_response" | sed 's/^.*"pem": "\([^"]*\)".*$/\1/') - # decode newline characters - key=$(echo "$key" | sed 's/\\n/\n/g') - echo "$key" - } - - set -o errexit - set -o nounset - set -o pipefail - - if [ "$TLS_DISABLED" == "True" ]; then - exit 0 - fi - - if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then - KUBE_NODE_PUBLIC_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) - fi - if [[ -z "${KUBE_NODE_IP}" ]]; then - KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) - fi - - sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}" - if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \ - && [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then - - sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}" - fi - if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \ - && [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then - sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}" - fi - MASTER_HOSTNAME=${MASTER_HOSTNAME:-} - if [[ -n "${MASTER_HOSTNAME}" ]]; then - sans="${sans},DNS:${MASTER_HOSTNAME}" - fi - sans="${sans},IP:127.0.0.1" - - KUBE_SERVICE_IP=$(echo $PORTAL_NETWORK_CIDR | awk 'BEGIN{FS="[./]"; OFS="."}{print $1,$2,$3,$4 + 1}') - - sans="${sans},IP:${KUBE_SERVICE_IP}" - - if [[ -n "${ETCD_LB_VIP}" ]]; then - sans="${sans},IP:${ETCD_LB_VIP}" - fi - - cert_conf_dir=${KUBE_CERTS_PATH}/conf - - mkdir -p ${cert_conf_dir} - - CA_CERT=${KUBE_CERTS_PATH}/ca.pem - SERVER_CERT=${KUBE_CERTS_PATH}/apiserver.pem - SERVER_CSR=${KUBE_CERTS_PATH}/apiserver.pem - SERVER_KEY=${KUBE_CERTS_PATH}/apiserver-key.pem - - if [ -f ${SERVER_CERT} ] || [ -f ${SERVER_KEY} ] || [ -f ${SERVER_CSR} ]; then - exit 0 - fi - - #Get a token by user credentials and trust - cat > auth.json << EOF - { - "auth": { - "identity": { - "methods": [ - "password" - ], - "password": { - "user": { - "id": "$TRUSTEE_USER_ID", - "password": "$TRUSTEE_PASSWORD" - } - } - } - } - } - EOF - - USER_TOKEN=`curl -k -s -i -X POST -H "Content-Type: application/json" -d @auth.json \ - $AUTH_URL/auth/tokens | grep X-Subject-Token | awk '{print $2}' | tr -d '\r'` - - rm -rf auth.json - - # Get CA certificate for this cluster - ca_cert_json=$(curl -k -X GET \ - -H "X-Auth-Token: $USER_TOKEN" \ - -H "OpenStack-API-Version: container-infra latest" \ - $MAGNUM_URL/certificates/$CLUSTER_UUID) - parse_json_response "${ca_cert_json}" > ${CA_CERT} - - # Create config for server's csr - cat > ${cert_conf_dir}/openssl.cnf < ${SERVER_CERT} - - chmod 600 ${KUBE_CERTS_PATH}/*-key.pem - # Certs will also be used by etcd service - chown -R etcd:etcd ${KUBE_CERTS_PATH} diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml deleted file mode 100644 index 6d8a2956..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml +++ /dev/null @@ -1,24 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/wc-notify.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Notify Heat - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/wc-notify.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/wc-notify.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/bash -v - command="$WAIT_CURL --insecure --data-binary '{\"status\": \"SUCCESS\"}'" - eval $(echo "$command") diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params-master.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params-master.yaml deleted file mode 100644 index d738795c..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params-master.yaml +++ /dev/null @@ -1,50 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/sysconfig/heat-params - owner: "root:root" - permissions: "0600" - content: | - KUBE_API_PUBLIC_ADDRESS="$KUBE_API_PUBLIC_ADDRESS" - KUBE_API_PRIVATE_ADDRESS="$KUBE_API_PRIVATE_ADDRESS" - KUBE_API_PORT="$KUBE_API_PORT" - KUBE_NODE_PUBLIC_IP="$KUBE_NODE_PUBLIC_IP" - KUBE_NODE_IP="$KUBE_NODE_IP" - KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" - DOCKER_VOLUME="$DOCKER_VOLUME" - DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" - NETWORK_DRIVER="$NETWORK_DRIVER" - FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR" - FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN" - FLANNEL_BACKEND="$FLANNEL_BACKEND" - PORTAL_NETWORK_CIDR="$PORTAL_NETWORK_CIDR" - ADMISSION_CONTROL_LIST="$ADMISSION_CONTROL_LIST" - ETCD_DISCOVERY_URL="$ETCD_DISCOVERY_URL" - USERNAME="$USERNAME" - PASSWORD="$PASSWORD" - TENANT_NAME="$TENANT_NAME" - CLUSTER_SUBNET="$CLUSTER_SUBNET" - TLS_DISABLED="$TLS_DISABLED" - CLUSTER_UUID="$CLUSTER_UUID" - MAGNUM_URL="$MAGNUM_URL" - HTTP_PROXY="$HTTP_PROXY" - HTTPS_PROXY="$HTTPS_PROXY" - NO_PROXY="$NO_PROXY" - WAIT_CURL="$WAIT_CURL" - KUBE_VERSION="$KUBE_VERSION" - TRUSTEE_USER_ID="$TRUSTEE_USER_ID" - TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" - TRUST_ID="$TRUST_ID" - AUTH_URL="$AUTH_URL" - INSECURE_REGISTRY_URL="$INSECURE_REGISTRY_URL" - SYSTEM_PODS_INITIAL_DELAY="$SYSTEM_PODS_INITIAL_DELAY" - SYSTEM_PODS_TIMEOUT="$SYSTEM_PODS_TIMEOUT" - KUBE_CERTS_PATH="$KUBE_CERTS_PATH" - HOST_CERTS_PATH="$HOST_CERTS_PATH" - HYPERKUBE_IMAGE_REPO="$HYPERKUBE_IMAGE_REPO" - CONTAINER_RUNTIME="$CONTAINER_RUNTIME" - ETCD_LB_VIP="$ETCD_LB_VIP" - KUBE_DASHBOARD_ENABLED="$KUBE_DASHBOARD_ENABLED" - KUBE_DASHBOARD_VERSION="$KUBE_DASHBOARD_VERSION" - DNS_SERVICE_IP="$DNS_SERVICE_IP" - DNS_CLUSTER_DOMAIN="$DNS_CLUSTER_DOMAIN" diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml deleted file mode 100644 index 8eb8e025..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml +++ /dev/null @@ -1,48 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/sysconfig/heat-params - owner: "root:root" - permissions: "0600" - content: | - KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" - KUBE_MASTER_IP="$KUBE_MASTER_IP" - KUBE_API_PORT="$KUBE_API_PORT" - KUBE_NODE_PUBLIC_IP="$KUBE_NODE_PUBLIC_IP" - KUBE_NODE_IP="$KUBE_NODE_IP" - ETCD_SERVER_IP="$ETCD_SERVER_IP" - DOCKER_VOLUME="$DOCKER_VOLUME" - DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" - NETWORK_DRIVER="$NETWORK_DRIVER" - REGISTRY_ENABLED="$REGISTRY_ENABLED" - REGISTRY_PORT="$REGISTRY_PORT" - SWIFT_REGION="$SWIFT_REGION" - REGISTRY_CONTAINER="$REGISTRY_CONTAINER" - REGISTRY_INSECURE="$REGISTRY_INSECURE" - REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE" - TLS_DISABLED="$TLS_DISABLED" - CLUSTER_UUID="$CLUSTER_UUID" - MAGNUM_URL="$MAGNUM_URL" - AUTH_URL="$AUTH_URL" - USERNAME="$USERNAME" - PASSWORD="$PASSWORD" - VOLUME_DRIVER="$VOLUME_DRIVER" - REGION_NAME="$REGION_NAME" - TENANT_NAME="$TENANT_NAME" - HTTP_PROXY="$HTTP_PROXY" - HTTPS_PROXY="$HTTPS_PROXY" - NO_PROXY="$NO_PROXY" - WAIT_CURL="$WAIT_CURL" - KUBE_VERSION="$KUBE_VERSION" - TRUSTEE_USER_ID="$TRUSTEE_USER_ID" - TRUSTEE_USERNAME="$TRUSTEE_USERNAME" - TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" - TRUSTEE_DOMAIN_ID="$TRUSTEE_DOMAIN_ID" - TRUST_ID="$TRUST_ID" - INSECURE_REGISTRY_URL="$INSECURE_REGISTRY_URL" - KUBE_CERTS_PATH="$KUBE_CERTS_PATH" - HOST_CERTS_PATH="$HOST_CERTS_PATH" - HYPERKUBE_IMAGE_REPO="$HYPERKUBE_IMAGE_REPO" - CONTAINER_RUNTIME="$CONTAINER_RUNTIME" - DNS_SERVICE_IP="$DNS_SERVICE_IP" - DNS_CLUSTER_DOMAIN="$DNS_CLUSTER_DOMAIN" diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml deleted file mode 100644 index c6661bbb..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml +++ /dev/null @@ -1,24 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/kubernetes/config/worker-kubeconfig.yaml - owner: "root:root" - permissions: "0644" - content: | - apiVersion: v1 - kind: Config - clusters: - - name: local - cluster: - certificate-authority: /etc/kubernetes/ssl/ca.pem - users: - - name: kubelet - user: - client-certificate: /etc/kubernetes/ssl/worker.pem - client-key: /etc/kubernetes/ssl/worker-key.pem - contexts: - - context: - cluster: local - user: kubelet - name: kubelet-context - current-context: kubelet-context diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-network-config.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-network-config.yaml deleted file mode 100644 index 49ede2bf..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-network-config.yaml +++ /dev/null @@ -1,45 +0,0 @@ -#cloud-config -write_files: - - path: /etc/systemd/system/write-network-config.service - owner: "root:root" - permissions: "0644" - content: | - [Unit] - Description=Write Network Config - - [Service] - Type=oneshot - EnvironmentFile=/etc/sysconfig/heat-params - ExecStart=/etc/sysconfig/write-network-config.sh - - [Install] - WantedBy=multi-user.target - - - path: /etc/sysconfig/write-network-config.sh - owner: "root:root" - permissions: "0755" - content: | - #!/bin/sh - - if [ "$NETWORK_DRIVER" != "flannel" ]; then - exit 0 - fi - - FLANNEL_JSON=/etc/sysconfig/flannel-network.json - cat > $FLANNEL_JSON < - This template will boot a coreos cluster with one or more minions (as - specified by the number_of_minions parameter, which defaults to 1) and one - master node. Allowing multiple masters is a work in progress. - -parameters: - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on the servers - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - default: public - - fixed_network: - type: string - description: uuid/name of an existing network to use to provision machines - default: "" - - fixed_subnet: - type: string - description: uuid/name of an existing subnet to use to provision machines - default: "" - - server_image: - type: string - default: CoreOS - description: glance image used to boot the servers - - master_flavor: - type: string - default: m1.small - description: flavor to use when booting the server for master node - - minion_flavor: - type: string - default: m1.small - description: flavor to use when booting the servers for minions - - prometheus_monitoring: - type: boolean - default: false - description: > - whether or not to have the grafana-prometheus-cadvisor monitoring setup - - grafana_admin_passwd: - type: string - default: admin - hidden: true - description: > - admin user password for the Grafana monitoring interface - - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - dns_nameserver: - type: string - description: address of a dns nameserver reachable in your environment - default: 8.8.8.8 - - number_of_masters: - type: number - description: how many kubernetes masters to spawn - default: 1 - - number_of_minions: - type: number - description: how many kubernetes minions to spawn - default: 1 - - fixed_network_cidr: - type: string - description: network range for fixed ip network - default: 10.0.0.0/24 - - portal_network_cidr: - type: string - description: > - address range used by kubernetes for service portals - default: 10.254.0.0/16 - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - default: 10.100.0.0/16 - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each minion - default: 24 - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - default: "host-gw" - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - system_pods_initial_delay: - type: number - description: > - health check, time to wait for system pods (podmaster, scheduler) to boot - (in seconds) - default: 30 - - system_pods_timeout: - type: number - description: > - health check, timeout for system pods (podmaster, scheduler) to answer. - (in seconds) - default: 5 - - admission_control_list: - type: string - description: > - List of admission control plugins to activate - default: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - default: "true" - constraints: - - allowed_values: ["true", "false"] - - minions_to_remove: - type: comma_delimited_list - description: > - List of minions to be removed when doing an update. Individual minion may - be referenced several ways: (1) The resource name (e.g. ['1', '3']), - (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should - be empty when doing a create. - default: [] - - network_driver: - type: string - description: network driver to use for instantiating container networks - default: flannel - - tls_disabled: - type: boolean - description: whether or not to disable TLS - default: False - - kube_dashboard_enabled: - type: boolean - description: whether or not to disable kubernetes dashboard - default: True - - loadbalancing_protocol: - type: string - description: > - The protocol which is used for load balancing. If you want to change - tls_disabled option to 'True', please change this to "HTTP". - default: TCP - constraints: - - allowed_values: ["TCP", "HTTP"] - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - default: 6443 - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - http_proxy: - type: string - description: http proxy address for docker - default: "" - - https_proxy: - type: string - description: https proxy address for docker - default: "" - - no_proxy: - type: string - description: no proxies for docker - default: "" - - trustee_domain_id: - type: string - description: domain id of the trustee - default: "" - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_username: - type: string - description: username of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - auth_url: - type: string - description: url for keystone - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - default: v1.6.2_coreos.0 - - kube_dashboard_version: - type: string - description: version of kubernetes dashboard used for kubernetes cluster - default: v1.5.1 - - hyperkube_image: - type: string - description: > - Docker registry used for hyperkube image - default: quay.io/coreos/hyperkube - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - default: false - - registry_port: - type: number - description: port of registry service - default: 5000 - - wait_condition_timeout: - type: number - description: > - timeout for the Wait Conditions - default: 6000 - - insecure_registry_url: - type: string - description: insecure registry url - constraints: - - allowed_pattern: "^$|.*/" - default: "" - - container_runtime: - type: string - description: > - Container runtime to use with Kubernetes. - default: "docker" - constraints: - - allowed_values: ["docker"] - - dns_service_ip: - type: string - description: > - address used by Kubernetes DNS service - default: 10.254.0.10 - - dns_cluster_domain: - type: string - description: > - domain name for cluster DNS - default: "cluster.local" - -resources: - - ###################################################################### - # - # network resources. allocate a network and router for our server. - # Important: the Load Balancer feature in Kubernetes requires that - # the name for the fixed_network must be "private" for the - # address lookup in Kubernetes to work properly - # - - network: - type: ../../common/templates/network.yaml - properties: - existing_network: {get_param: fixed_network} - existing_subnet: {get_param: fixed_subnet} - private_network_cidr: {get_param: fixed_network_cidr} - dns_nameserver: {get_param: dns_nameserver} - external_network: {get_param: external_network} - private_network_name: private - - api_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - protocol: {get_param: loadbalancing_protocol} - port: {get_param: kubernetes_port} - - etcd_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - protocol: {get_param: loadbalancing_protocol} - port: 2379 - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup_master: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 7080 - port_range_max: 7080 - - protocol: tcp - port_range_min: 8080 - port_range_max: 8080 - - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - - protocol: tcp - port_range_min: 6443 - port_range_max: 6443 - - secgroup_minion_all_open: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # resources that expose the IPs of either the kube master or a given - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_attr: [api_lb, floating_address]} - pool_private_ip: {get_attr: [api_lb, address]} - master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} - master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} - - etcd_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_private_ip: {get_attr: [etcd_lb, address]} - master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} - - ###################################################################### - # - # resources that expose the IPs of either floating ip or a given - # fixed ip depending on whether FloatingIP is enabled for the cluster. - # - - api_address_floating_switch: - type: Magnum::FloatingIPAddressSwitcher - properties: - public_ip: {get_attr: [api_address_lb_switch, public_ip]} - private_ip: {get_attr: [api_address_lb_switch, private_ip]} - - ###################################################################### - # - # kubernetes masters. This is a resource group that will create - # master. - # - - kube_masters: - type: OS::Heat::ResourceGroup - depends_on: - - network - properties: - count: {get_param: number_of_masters} - resource_def: - type: kubemaster.yaml - properties: - api_public_address: {get_attr: [api_lb, floating_address]} - api_private_address: {get_attr: [api_lb, address]} - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - master_flavor: {get_param: master_flavor} - external_network: {get_param: external_network} - kube_allow_priv: {get_param: kube_allow_priv} - flannel_network_cidr: {get_param: flannel_network_cidr} - flannel_network_subnetlen: {get_param: flannel_network_subnetlen} - flannel_backend: {get_param: flannel_backend} - system_pods_initial_delay: {get_param: system_pods_initial_delay} - system_pods_timeout: {get_param: system_pods_timeout} - portal_network_cidr: {get_param: portal_network_cidr} - admission_control_list: {get_param: admission_control_list} - fixed_network: {get_attr: [network, fixed_network]} - fixed_subnet: {get_attr: [network, fixed_subnet]} - discovery_url: {get_param: discovery_url} - network_driver: {get_param: network_driver} - kubernetes_port: {get_param: kubernetes_port} - tls_disabled: {get_param: tls_disabled} - kube_dashboard_enabled: {get_param: kube_dashboard_enabled} - secgroup_kube_master_id: {get_resource: secgroup_master} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - kube_version: {get_param: kube_version} - kube_dashboard_version: {get_param: kube_dashboard_version} - wait_condition_timeout: {get_param: wait_condition_timeout} - cluster_uuid: {get_param: cluster_uuid} - api_pool_id: {get_attr: [api_lb, pool_id]} - etcd_pool_id: {get_attr: [etcd_lb, pool_id]} - magnum_url: {get_param: magnum_url} - trustee_user_id: {get_param: trustee_user_id} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - hyperkube_image: {get_param: hyperkube_image} - insecure_registry_url: {get_param: insecure_registry_url} - container_runtime: {get_param: container_runtime} - prometheus_monitoring: {get_param: prometheus_monitoring} - grafana_admin_passwd: {get_param: grafana_admin_passwd} - etcd_lb_vip: {get_attr: [etcd_lb, address]} - dns_service_ip: {get_param: dns_service_ip} - dns_cluster_domain: {get_param: dns_cluster_domain} - - ###################################################################### - # - # kubernetes minions. This is a resource group that will initially - # create minions, and needs to be manually scaled. - # - - kube_minions: - type: OS::Heat::ResourceGroup - depends_on: - - network - - kube_masters - properties: - count: {get_param: number_of_minions} - removal_policies: [{resource_list: {get_param: minions_to_remove}}] - resource_def: - type: kubeminion.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - minion_flavor: {get_param: minion_flavor} - fixed_network: {get_attr: [network, fixed_network]} - fixed_subnet: {get_attr: [network, fixed_subnet]} - flannel_network_cidr: {get_param: flannel_network_cidr} - kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} - etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} - external_network: {get_param: external_network} - kube_allow_priv: {get_param: kube_allow_priv} - network_driver: {get_param: network_driver} - kubernetes_port: {get_param: kubernetes_port} - tls_disabled: {get_param: tls_disabled} - secgroup_kube_minion_id: {get_resource: secgroup_minion_all_open} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - kube_version: {get_param: kube_version} - wait_condition_timeout: {get_param: wait_condition_timeout} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - trustee_user_id: {get_param: trustee_user_id} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - hyperkube_image: {get_param: hyperkube_image} - insecure_registry_url: {get_param: insecure_registry_url} - container_runtime: {get_param: container_runtime} - prometheus_monitoring: {get_param: prometheus_monitoring} - dns_service_ip: {get_param: dns_service_ip} - dns_cluster_domain: {get_param: dns_cluster_domain} - -outputs: - - api_address: - value: - str_replace: - template: api_ip_address - params: - api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} - description: > - This is the API endpoint of the Kubernetes cluster. Use this to access - the Kubernetes API. - - kube_masters_private: - value: {get_attr: [kube_masters, kube_master_ip]} - description: > - This is a list of the "private" IP addresses of all the Kubernetes masters. - - kube_masters: - value: {get_attr: [kube_masters, kube_master_external_ip]} - description: > - This is a list of the "public" IP addresses of all the Kubernetes masters. - Use these IP addresses to log in to the Kubernetes masters via ssh or to access - the Kubernetes API. - - kube_minions_private: - value: {get_attr: [kube_minions, kube_minion_ip]} - description: > - This is a list of the "private" IP addresses of all the Kubernetes minions. - - kube_minions: - value: {get_attr: [kube_minions, kube_minion_external_ip]} - description: > - This is a list of the "public" IP addresses of all the Kubernetes minions. - Use these IP addresses to log in to the Kubernetes minions via ssh. diff --git a/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml b/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml deleted file mode 100644 index 8a8ca6e0..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml +++ /dev/null @@ -1,531 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a Kubernetes master. This stack is - included by an ResourceGroup resource in the parent template - (kubeclusters.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - api_pool_id: - type: string - description: ID of the load balancer pool of k8s API server. - - etcd_pool_id: - type: string - description: ID of the load balancer pool of etcd server. - - portal_network_cidr: - type: string - description: > - address range used by kubernetes for service portals - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - constraints: - - allowed_values: ["true", "false"] - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each master - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - system_pods_initial_delay: - type: number - description: > - health check, time to wait for system pods (podmaster, scheduler) to boot - (in seconds) - default: 30 - - system_pods_timeout: - type: number - description: > - health check, timeout for system pods (podmaster, scheduler) to answer. - (in seconds) - default: 5 - - admission_control_list: - type: string - description: > - List of admission control plugins to activate - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - - secgroup_kube_master_id: - type: string - description: ID of the security group for kubernetes master. - - network_driver: - type: string - description: network driver to use for instantiating container networks - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - kube_dashboard_enabled: - type: boolean - description: whether or not to disable kubernetes dashboard - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - default: 6443 - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - kube_dashboard_version: - type: string - description: version of kubernetes dashboard used for kubernetes cluster - - hyperkube_image: - type: string - description: > - Docker registry used for hyperkube image - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - prometheus_monitoring: - type: boolean - description: > - whether or not to have prometheus and grafana deployed - - grafana_admin_passwd: - type: string - hidden: true - description: > - admin user password for the Grafana monitoring interface - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - api_public_address: - type: string - description: Public IP address of the Kubernetes master server. - default: "" - - api_private_address: - type: string - description: Private IP address of the Kubernetes master server. - default: "" - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - auth_url: - type: string - description: url for keystone - - insecure_registry_url: - type: string - description: insecure registry url - - container_runtime: - type: string - description: > - Container runtime to use with Kubernetes. - - etcd_lb_vip: - type: string - description: > - etcd lb vip private used to generate certs on master. - default: "" - - dns_service_ip: - type: string - description: > - address used by Kubernetes DNS service - - dns_cluster_domain: - type: string - description: > - domain name for cluster DNS - -resources: - - master_wait_handle: - type: OS::Heat::WaitConditionHandle - - master_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube-master - properties: - handle: {get_resource: master_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # resource that exposes the IPs of either the kube master or the API - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_param: api_public_address} - pool_private_ip: {get_param: api_private_address} - master_public_ip: {get_attr: [kube_master_floating, floating_ip_address]} - master_private_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params-master.yaml} - params: - "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} - "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} - "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} - "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} - "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} - "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} - "$FLANNEL_BACKEND": {get_param: flannel_backend} - "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} - "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} - "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} - "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} - "$CLUSTER_SUBNET": {get_param: fixed_subnet} - "$ETCD_DISCOVERY_URL": {get_param: discovery_url} - "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} - "$NETWORK_DRIVER": {get_param: network_driver} - "$KUBE_API_PORT": {get_param: kubernetes_port} - "$TLS_DISABLED": {get_param: tls_disabled} - "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} - "$KUBE_VERSION": {get_param: kube_version} - "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$AUTH_URL": {get_param: auth_url} - "$KUBE_CERTS_PATH": "/etc/kubernetes/ssl" - "$HOST_CERTS_PATH": "/usr/share/ca-certificates" - "$HYPERKUBE_IMAGE_REPO": - str_replace: - template: insecure_registry_urlhyperkube_image - params: - insecure_registry_url: { get_param: insecure_registry_url } - hyperkube_image: { get_param: hyperkube_image } - "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} - "$CONTAINER_RUNTIME": {get_param: container_runtime} - "$ETCD_LB_VIP": {get_param: etcd_lb_vip} - "$DNS_SERVICE_IP": {get_param: dns_service_ip} - "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} - - configure_etcd: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-etcd.yaml} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/make-cert.yaml} - - write_network_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/write-network-config.yaml} - - enable_network_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-network-service.yaml} - - enable_kubelet: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-kubelet-master.yaml} - - enable_kube_apiserver: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-kube-apiserver.yaml} - - create_kube_namespace: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/create-kube-namespace.yaml} - - enable_kube_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-kube-proxy-master.yaml} - - enable_kube_controller_manager: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-kube-controller-manager.yaml} - - enable_kube_scheduler: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-kube-scheduler.yaml} - - enable_kube_dashboard: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-kube-dashboard.yaml} - - wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/wc-notify.yaml} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/add-proxy.yaml} - - configure_docker: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-docker.yaml} - - enable_coredns: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-coredns.yaml} - - kube_master_init: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - $write_heat_params - $make_cert - $configure_docker - $add_proxy - $configure_etcd - $write_network_config - $enable_network_service - $enable_kubelet - $enable_kube_apiserver - $create_kube_namespace - $enable_kube_proxy - $enable_kube_controller_manager - $enable_kube_scheduler - $enable_kube_dashboard - $enable_coredns - $wc_notify - coreos: - units: - - name: "make-cert.service" - command: "start" - - name: "configure-docker.service" - command: "start" - - name: "add-proxy.service" - command: "start" - - name: "configure-etcd.service" - command: "start" - - name: "write-network-config.service" - command: "start" - - name: "enable-network-service.service" - command: "start" - - name: "enable-kubelet.service" - command: "start" - - name: "enable-kube-apiserver.service" - command: "start" - - name: "create-kube-namespace.service" - command: "start" - - name: "enable-kube-proxy.service" - command: "start" - - name: "enable-kube-controller-manager.service" - command: "start" - - name: "enable-kube-scheduler.service" - command: "start" - - name: "enable-kube-dashboard.service" - command: "start" - - name: "enable-coredns.service" - command: "start" - - name: "wc-notify.service" - command: "start" - params: - "$write_heat_params": {get_attr: [write_heat_params, config]} - "$make_cert": {get_attr: [make_cert, config]} - "$configure_docker": {get_attr: [configure_docker, config]} - "$add_proxy": {get_attr: [add_proxy, config]} - "$configure_etcd": {get_attr: [configure_etcd, config]} - "$write_network_config": {get_attr: [write_network_config, config]} - "$enable_network_service": {get_attr: [enable_network_service, config]} - "$enable_kubelet": {get_attr: [enable_kubelet, config]} - "$enable_kube_apiserver": {get_attr: [enable_kube_apiserver, config]} - "$create_kube_namespace": {get_attr: [create_kube_namespace, config]} - "$enable_kube_proxy": {get_attr: [enable_kube_proxy, config]} - "$enable_kube_controller_manager": {get_attr: [enable_kube_controller_manager, config]} - "$enable_kube_scheduler": {get_attr: [enable_kube_scheduler, config]} - "$enable_kube_dashboard": {get_attr: [enable_kube_dashboard, config]} - "$enable_coredns": {get_attr: [enable_coredns, config]} - "$wc_notify": {get_attr: [wc_notify, config]} - - ###################################################################### - # - # a single kubernetes master. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - kube-master: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: master_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_master_init} - networks: - - port: {get_resource: kube_master_eth0} - - kube_master_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - {get_param: secgroup_kube_master_id} - fixed_ips: - - subnet: {get_param: fixed_subnet} - allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} - replacement_policy: AUTO - - kube_master_floating: - type: Magnum::Optional::KubeMaster::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: kube_master_eth0} - - api_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_id} - address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: {get_param: kubernetes_port} - - etcd_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: etcd_pool_id} - address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 2379 - -outputs: - - kube_master_ip: - value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" IP address of the Kubernetes master node. - - kube_master_external_ip: - value: {get_attr: [kube_master_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes master node. diff --git a/magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml b/magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml deleted file mode 100644 index 51296f15..00000000 --- a/magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml +++ /dev/null @@ -1,347 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Kubernetes minion, - based on a CoreOS cloud image. This stack is included by a ResourceGroup - resource in the parent template (kubecluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - minion_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - constraints: - - allowed_values: ["true", "false"] - - network_driver: - type: string - description: network driver to use for instantiating container networks - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - default: 6443 - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - hyperkube_image: - type: string - description: > - Docker registry used for hyperkube image - - kube_master_ip: - type: string - description: IP address of the Kubernetes master server. - - etcd_server_ip: - type: string - description: IP address of the Etcd server. - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - wait_condition_timeout: - type: number - description: > - timeout for the Wait Conditions - - secgroup_kube_minion_id: - type: string - description: ID of the security group for kubernetes minion. - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - auth_url: - type: string - description: url for keystone - - insecure_registry_url: - type: string - description: insecure registry url - - container_runtime: - type: string - description: > - Container runtime to use with Kubernetes. - - prometheus_monitoring: - type: boolean - description: > - whether or not to have the node-exporter running on the node - - dns_service_ip: - type: string - description: > - address used by Kubernetes DNS service - - dns_cluster_domain: - type: string - description: > - domain name for cluster DNS - -resources: - - minion_wait_handle: - type: OS::Heat::WaitConditionHandle - - minion_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube-minion - properties: - handle: {get_resource: minion_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params.yaml} - params: - "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} - "$KUBE_MASTER_IP": {get_param: kube_master_ip} - "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_minion_floating, floating_ip_address]} - "$KUBE_NODE_IP": {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} - "$WAIT_CURL": {get_attr: [minion_wait_handle, curl_cli]} - "$KUBE_API_PORT": {get_param: kubernetes_port} - "$TLS_DISABLED": {get_param: tls_disabled} - "$NETWORK_DRIVER": {get_param: network_driver} - "$ETCD_SERVER_IP": {get_param: etcd_server_ip} - "$KUBE_VERSION": {get_param: kube_version} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$AUTH_URL": {get_param: auth_url} - "$KUBE_CERTS_PATH": "/etc/kubernetes/ssl" - "$HOST_CERTS_PATH": "/usr/share/ca-certificates" - "$HYPERKUBE_IMAGE_REPO": - str_replace: - template: insecure_registry_urlhyperkube_image - params: - insecure_registry_url: { get_param: insecure_registry_url } - hyperkube_image: { get_param: hyperkube_image } - "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} - "$CONTAINER_RUNTIME": {get_param: container_runtime} - "$DNS_SERVICE_IP": {get_param: dns_service_ip} - "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} - - write_kubeconfig: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/write-kubeconfig.yaml} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/make-cert-client.yaml} - - enable_network_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-network-service-client.yaml} - - enable_kubelet: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-kubelet-minion.yaml} - - enable_kube_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/enable-kube-proxy-minion.yaml} - - wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/wc-notify.yaml} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/add-proxy.yaml} - - configure_docker: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-docker.yaml} - - kube_minion_init: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - $write_heat_params - $write_kubeconfig - $make_cert - $configure_docker - $add_proxy - $enable_network_service - $enable_kubelet - $enable_kube_proxy - $wc_notify - coreos: - units: - - name: "make-cert.service" - command: "start" - - name: "configure-docker.service" - command: "start" - - name: "add-proxy.service" - command: "start" - - name: "enable-network-service.service" - command: "start" - - name: "enable-kubelet.service" - command: "start" - - name: "enable-kube-proxy.service" - command: "start" - - name: "wc-notify.service" - command: "start" - params: - "$write_heat_params": {get_attr: [write_heat_params, config]} - "$write_kubeconfig": {get_attr: [write_kubeconfig, config]} - "$make_cert": {get_attr: [make_cert, config]} - "$configure_docker": {get_attr: [configure_docker, config]} - "$add_proxy": {get_attr: [add_proxy, config]} - "$enable_network_service": {get_attr: [enable_network_service, config]} - "$enable_kubelet": {get_attr: [enable_kubelet, config]} - "$enable_kube_proxy": {get_attr: [enable_kube_proxy, config]} - "$wc_notify": {get_attr: [wc_notify, config]} - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - kube-minion: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: minion_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_minion_init} - networks: - - port: {get_resource: kube_minion_eth0} - - kube_minion_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - {get_param: secgroup_kube_minion_id} - fixed_ips: - - subnet: {get_param: fixed_subnet} - allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} - replacement_policy: AUTO - - kube_minion_floating: - type: Magnum::Optional::KubeMinion::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: kube_minion_eth0} - -outputs: - - kube_minion_ip: - value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" IP address of the Kubernetes minion node. - - kube_minion_external_ip: - value: {get_attr: [kube_minion_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes minion node. - - OS::stack_id: - value: {get_param: "OS::stack_id"} - description: > - This is a id of the stack which creates from this template. diff --git a/magnum/drivers/k8s_coreos_v1/version.py b/magnum/drivers/k8s_coreos_v1/version.py deleted file mode 100644 index 0875afe1..00000000 --- a/magnum/drivers/k8s_coreos_v1/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 - Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version = '1.0.0' -driver = 'k8s_coreos_v1' -container_version = '1.11.2' diff --git a/magnum/drivers/k8s_fedora_atomic_v1/__init__.py b/magnum/drivers/k8s_fedora_atomic_v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/k8s_fedora_atomic_v1/driver.py b/magnum/drivers/k8s_fedora_atomic_v1/driver.py deleted file mode 100644 index 161cad47..00000000 --- a/magnum/drivers/k8s_fedora_atomic_v1/driver.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.drivers.common import k8s_monitor -from magnum.drivers.common.k8s_scale_manager import K8sScaleManager -from magnum.drivers.heat import driver -from magnum.drivers.k8s_fedora_atomic_v1 import template_def - - -class Driver(driver.HeatDriver): - - @property - def provides(self): - return [ - {'server_type': 'vm', - 'os': 'fedora-atomic', - 'coe': 'kubernetes'}, - ] - - def get_template_definition(self): - return template_def.AtomicK8sTemplateDefinition() - - def get_monitor(self, context, cluster): - return k8s_monitor.K8sMonitor(context, cluster) - - def get_scale_manager(self, context, osclient, cluster): - return K8sScaleManager(context, osclient, cluster) diff --git a/magnum/drivers/k8s_fedora_atomic_v1/template_def.py b/magnum/drivers/k8s_fedora_atomic_v1/template_def.py deleted file mode 100644 index afd99d12..00000000 --- a/magnum/drivers/k8s_fedora_atomic_v1/template_def.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import magnum.conf -from magnum.drivers.heat import k8s_fedora_template_def as kftd - -CONF = magnum.conf.CONF - - -class AtomicK8sTemplateDefinition(kftd.K8sFedoraTemplateDefinition): - """Kubernetes template for a Fedora Atomic VM.""" - - @property - def driver_module_path(self): - return __name__[:__name__.rindex('.')] - - @property - def template_path(self): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'templates/kubecluster.yaml') diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/COPYING b/magnum/drivers/k8s_fedora_atomic_v1/templates/COPYING deleted file mode 100644 index d6456956..00000000 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml deleted file mode 100644 index 2e988b28..00000000 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml +++ /dev/null @@ -1,623 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This template will boot a Kubernetes cluster with one or more - minions (as specified by the number_of_minions parameter, which - defaults to 1). - -parameters: - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - default: public - - fixed_network: - type: string - description: uuid/name of an existing network to use to provision machines - default: "" - - fixed_subnet: - type: string - description: uuid/name of an existing subnet to use to provision machines - default: "" - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - default: m1.small - description: flavor to use when booting the server for master nodes - - minion_flavor: - type: string - default: m1.small - description: flavor to use when booting the server for minions - - prometheus_monitoring: - type: boolean - default: false - description: > - whether or not to have the grafana-prometheus-cadvisor monitoring setup - - grafana_admin_passwd: - type: string - default: admin - hidden: true - description: > - admin user password for the Grafana monitoring interface - - dns_nameserver: - type: string - description: address of a DNS nameserver reachable in your environment - default: 8.8.8.8 - - number_of_masters: - type: number - description: how many kubernetes masters to spawn - default: 1 - - number_of_minions: - type: number - description: how many kubernetes minions to spawn - default: 1 - - fixed_network_cidr: - type: string - description: network range for fixed ip network - default: 10.0.0.0/24 - - portal_network_cidr: - type: string - description: > - address range used by kubernetes for service portals - default: 10.254.0.0/16 - - network_driver: - type: string - description: network driver to use for instantiating container networks - default: flannel - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - default: 10.100.0.0/16 - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each minion - default: 24 - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - default: "udp" - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - system_pods_initial_delay: - type: number - description: > - health check, time to wait for system pods (podmaster, scheduler) to boot - (in seconds) - default: 30 - - system_pods_timeout: - type: number - description: > - health check, timeout for system pods (podmaster, scheduler) to answer. - (in seconds) - default: 5 - - admission_control_list: - type: string - description: > - List of admission control plugins to activate - default: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - default: "true" - constraints: - - allowed_values: ["true", "false"] - - etcd_volume_size: - type: number - description: > - size of the cinder volume for etcd storage - default: 0 - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - default: 0 - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - default: "devicemapper" - constraints: - - allowed_values: ["devicemapper", "overlay"] - - wait_condition_timeout: - type: number - description: > - timeout for the Wait Conditions - default: 6000 - - minions_to_remove: - type: comma_delimited_list - description: > - List of minions to be removed when doing an update. Individual minion may - be referenced several ways: (1) The resource name (e.g. ['1', '3']), - (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should - be empty when doing an create. - default: [] - - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - default: false - - registry_port: - type: number - description: port of registry service - default: 5000 - - swift_region: - type: string - description: region of swift service - default: "" - - registry_container: - type: string - description: > - name of swift container which docker registry stores images in - default: "container" - - registry_insecure: - type: boolean - description: > - indicates whether to skip TLS verification between registry and backend storage - default: true - - registry_chunksize: - type: number - description: > - size fo the data segments for the swift dynamic large objects - default: 5242880 - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - region_name: - type: string - description: A logically separate section of the cluster - - username: - type: string - description: > - user account - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for Kubernetes config file - default: - ChangeMe - hidden: true - - tenant_name: - type: string - description: > - tenant name - - loadbalancing_protocol: - type: string - description: > - The protocol which is used for load balancing. If you want to change - tls_disabled option to 'True', please change this to "HTTP". - default: TCP - constraints: - - allowed_values: ["TCP", "HTTP"] - - tls_disabled: - type: boolean - description: whether or not to disable TLS - default: False - - kube_dashboard_enabled: - type: boolean - description: whether or not to enable kubernetes dashboard - default: True - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - default: 6443 - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - http_proxy: - type: string - description: http proxy address for docker - default: "" - - https_proxy: - type: string - description: https proxy address for docker - default: "" - - no_proxy: - type: string - description: no proxies for docker - default: "" - - trustee_domain_id: - type: string - description: domain id of the trustee - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_username: - type: string - description: username of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - auth_url: - type: string - description: url for keystone - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - default: v1.5.3 - - kube_dashboard_version: - type: string - description: version of kubernetes dashboard used for kubernetes cluster - default: v1.5.1 - - insecure_registry_url: - type: string - description: insecure registry url - default: "" - - dns_service_ip: - type: string - description: > - address used by Kubernetes DNS service - default: 10.254.0.10 - - dns_cluster_domain: - type: string - description: > - domain name for cluster DNS - default: "cluster.local" - -resources: - - ###################################################################### - # - # network resources. allocate a network and router for our server. - # Important: the Load Balancer feature in Kubernetes requires that - # the name for the fixed_network must be "private" for the - # address lookup in Kubernetes to work properly - # - - network: - type: ../../common/templates/network.yaml - properties: - existing_network: {get_param: fixed_network} - existing_subnet: {get_param: fixed_subnet} - private_network_cidr: {get_param: fixed_network_cidr} - dns_nameserver: {get_param: dns_nameserver} - external_network: {get_param: external_network} - private_network_name: private - - api_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - protocol: {get_param: loadbalancing_protocol} - port: {get_param: kubernetes_port} - - etcd_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - protocol: {get_param: loadbalancing_protocol} - port: 2379 - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup_kube_master: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 7080 - port_range_max: 7080 - - protocol: tcp - port_range_min: 8080 - port_range_max: 8080 - - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - - protocol: tcp - port_range_min: 6443 - port_range_max: 6443 - - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - - secgroup_kube_minion: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # resources that expose the IPs of either the kube master or a given - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_attr: [api_lb, floating_address]} - pool_private_ip: {get_attr: [api_lb, address]} - master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} - master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} - - etcd_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_private_ip: {get_attr: [etcd_lb, address]} - master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} - - ###################################################################### - # - # resources that expose the IPs of either floating ip or a given - # fixed ip depending on whether FloatingIP is enabled for the cluster. - # - - api_address_floating_switch: - type: Magnum::FloatingIPAddressSwitcher - properties: - public_ip: {get_attr: [api_address_lb_switch, public_ip]} - private_ip: {get_attr: [api_address_lb_switch, private_ip]} - - ###################################################################### - # - # kubernetes masters. This is a resource group that will create - # masters. - # - - kube_masters: - type: OS::Heat::ResourceGroup - depends_on: - - network - properties: - count: {get_param: number_of_masters} - resource_def: - type: kubemaster.yaml - properties: - prometheus_monitoring: {get_param: prometheus_monitoring} - grafana_admin_passwd: {get_param: grafana_admin_passwd} - api_public_address: {get_attr: [api_lb, floating_address]} - api_private_address: {get_attr: [api_lb, address]} - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - master_flavor: {get_param: master_flavor} - external_network: {get_param: external_network} - kube_allow_priv: {get_param: kube_allow_priv} - etcd_volume_size: {get_param: etcd_volume_size} - docker_volume_size: {get_param: docker_volume_size} - docker_volume_type: {get_param: docker_volume_type} - docker_storage_driver: {get_param: docker_storage_driver} - wait_condition_timeout: {get_param: wait_condition_timeout} - network_driver: {get_param: network_driver} - flannel_network_cidr: {get_param: flannel_network_cidr} - flannel_network_subnetlen: {get_param: flannel_network_subnetlen} - flannel_backend: {get_param: flannel_backend} - system_pods_initial_delay: {get_param: system_pods_initial_delay} - system_pods_timeout: {get_param: system_pods_timeout} - portal_network_cidr: {get_param: portal_network_cidr} - admission_control_list: {get_param: admission_control_list} - discovery_url: {get_param: discovery_url} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - volume_driver: {get_param: volume_driver} - fixed_network: {get_attr: [network, fixed_network]} - fixed_subnet: {get_attr: [network, fixed_subnet]} - api_pool_id: {get_attr: [api_lb, pool_id]} - etcd_pool_id: {get_attr: [etcd_lb, pool_id]} - username: {get_param: username} - password: {get_param: password} - tenant_name: {get_param: tenant_name} - kubernetes_port: {get_param: kubernetes_port} - tls_disabled: {get_param: tls_disabled} - kube_dashboard_enabled: {get_param: kube_dashboard_enabled} - secgroup_kube_master_id: {get_resource: secgroup_kube_master} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - kube_version: {get_param: kube_version} - kube_dashboard_version: {get_param: kube_dashboard_version} - trustee_user_id: {get_param: trustee_user_id} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - insecure_registry_url: {get_param: insecure_registry_url} - etcd_lb_vip: {get_attr: [etcd_lb, address]} - dns_service_ip: {get_param: dns_service_ip} - dns_cluster_domain: {get_param: dns_cluster_domain} - - ###################################################################### - # - # kubernetes minions. This is an resource group that will initially - # create minions, and needs to be manually scaled. - # - - kube_minions: - type: OS::Heat::ResourceGroup - depends_on: - - network - properties: - count: {get_param: number_of_minions} - removal_policies: [{resource_list: {get_param: minions_to_remove}}] - resource_def: - type: kubeminion.yaml - properties: - prometheus_monitoring: {get_param: prometheus_monitoring} - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - minion_flavor: {get_param: minion_flavor} - fixed_network: {get_attr: [network, fixed_network]} - fixed_subnet: {get_attr: [network, fixed_subnet]} - network_driver: {get_param: network_driver} - flannel_network_cidr: {get_param: flannel_network_cidr} - kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} - etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} - external_network: {get_param: external_network} - kube_allow_priv: {get_param: kube_allow_priv} - docker_volume_size: {get_param: docker_volume_size} - docker_volume_type: {get_param: docker_volume_type} - docker_storage_driver: {get_param: docker_storage_driver} - wait_condition_timeout: {get_param: wait_condition_timeout} - registry_enabled: {get_param: registry_enabled} - registry_port: {get_param: registry_port} - swift_region: {get_param: swift_region} - registry_container: {get_param: registry_container} - registry_insecure: {get_param: registry_insecure} - registry_chunksize: {get_param: registry_chunksize} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - volume_driver: {get_param: volume_driver} - region_name: {get_param: region_name} - tenant_name: {get_param: tenant_name} - auth_url: {get_param: auth_url} - username: {get_param: username} - password: {get_param: password} - kubernetes_port: {get_param: kubernetes_port} - tls_disabled: {get_param: tls_disabled} - secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - kube_version: {get_param: kube_version} - trustee_user_id: {get_param: trustee_user_id} - trustee_username: {get_param: trustee_username} - trustee_password: {get_param: trustee_password} - trustee_domain_id: {get_param: trustee_domain_id} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - insecure_registry_url: {get_param: insecure_registry_url} - dns_service_ip: {get_param: dns_service_ip} - dns_cluster_domain: {get_param: dns_cluster_domain} - -outputs: - - api_address: - value: - str_replace: - template: api_ip_address - params: - api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} - description: > - This is the API endpoint of the Kubernetes cluster. Use this to access - the Kubernetes API. - - registry_address: - value: - str_replace: - template: localhost:port - params: - port: {get_param: registry_port} - description: - This is the url of docker registry server where you can store docker - images. - - kube_masters_private: - value: {get_attr: [kube_masters, kube_master_ip]} - description: > - This is a list of the "private" IP addresses of all the Kubernetes masters. - - kube_masters: - value: {get_attr: [kube_masters, kube_master_external_ip]} - description: > - This is a list of the "public" IP addresses of all the Kubernetes masters. - Use these IP addresses to log in to the Kubernetes masters via ssh. - - kube_minions_private: - value: {get_attr: [kube_minions, kube_minion_ip]} - description: > - This is a list of the "private" IP addresses of all the Kubernetes minions. - - kube_minions: - value: {get_attr: [kube_minions, kube_minion_external_ip]} - description: > - This is a list of the "public" IP addresses of all the Kubernetes minions. - Use these IP addresses to log in to the Kubernetes minions via ssh. diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml deleted file mode 100644 index 32a911a2..00000000 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml +++ /dev/null @@ -1,602 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Kubernetes master, This stack is - included by an ResourceGroup resource in the parent template - (kubecluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - portal_network_cidr: - type: string - description: > - address range used by kubernetes for service portals - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - constraints: - - allowed_values: ["true", "false"] - - etcd_volume_size: - type: number - description: > - size of a cinder volume to allocate for etcd storage - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - default: "devicemapper" - constraints: - - allowed_values: ["devicemapper", "overlay"] - - volume_driver: - type: string - description: volume driver to use for container storage - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each master - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - system_pods_initial_delay: - type: number - description: > - health check, time to wait for system pods (podmaster, scheduler) to boot - (in seconds) - default: 30 - - system_pods_timeout: - type: number - description: > - health check, timeout for system pods (podmaster, scheduler) to answer. - (in seconds) - default: 5 - - admission_control_list: - type: string - description: > - List of admission control plugins to activate - - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - kube_dashboard_enabled: - type: boolean - description: whether or not to disable kubernetes dashboard - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - prometheus_monitoring: - type: boolean - description: > - whether or not to have prometheus and grafana deployed - - grafana_admin_passwd: - type: string - hidden: true - description: > - admin user password for the Grafana monitoring interface - - api_public_address: - type: string - description: Public IP address of the Kubernetes master server. - default: "" - - api_private_address: - type: string - description: Private IP address of the Kubernetes master server. - default: "" - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - network_driver: - type: string - description: network driver to use for instantiating container networks - - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - - secgroup_kube_master_id: - type: string - description: ID of the security group for kubernetes master. - - api_pool_id: - type: string - description: ID of the load balancer pool of k8s API server. - - etcd_pool_id: - type: string - description: ID of the load balancer pool of etcd server. - - auth_url: - type: string - description: > - url for kubernetes to authenticate - - username: - type: string - description: > - user account - - password: - type: string - description: > - user password - - tenant_name: - type: string - description: > - tenant name - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - kube_dashboard_version: - type: string - description: version of kubernetes dashboard used for kubernetes cluster - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - insecure_registry_url: - type: string - description: insecure registry url - - etcd_lb_vip: - type: string - description: > - etcd lb vip private used to generate certs on master. - default: "" - - dns_service_ip: - type: string - description: > - address used by Kubernetes DNS service - - dns_cluster_domain: - type: string - description: > - domain name for cluster DNS - -resources: - - master_wait_handle: - type: OS::Heat::WaitConditionHandle - - master_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube-master - properties: - handle: {get_resource: master_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # resource that exposes the IPs of either the kube master or the API - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_param: api_public_address} - pool_private_ip: {get_param: api_private_address} - master_public_ip: {get_attr: [kube_master_floating, floating_ip_address]} - master_private_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml} - params: - "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} - "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} - "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} - "$KUBE_API_PORT": {get_param: kubernetes_port} - "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} - "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} - "$ETCD_VOLUME": {get_resource: etcd_volume} - "$ETCD_VOLUME_SIZE": {get_param: etcd_volume_size} - "$DOCKER_VOLUME": {get_resource: docker_volume} - "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} - "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} - "$NETWORK_DRIVER": {get_param: network_driver} - "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} - "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} - "$FLANNEL_BACKEND": {get_param: flannel_backend} - "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} - "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} - "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} - "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} - "$ETCD_DISCOVERY_URL": {get_param: discovery_url} - "$AUTH_URL": {get_param: auth_url} - "$USERNAME": {get_param: username} - "$PASSWORD": {get_param: password} - "$TENANT_NAME": {get_param: tenant_name} - "$CLUSTER_SUBNET": {get_param: fixed_subnet} - "$TLS_DISABLED": {get_param: tls_disabled} - "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$VOLUME_DRIVER": {get_param: volume_driver} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$KUBE_VERSION": {get_param: kube_version} - "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} - "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} - "$ETCD_LB_VIP": {get_param: etcd_lb_vip} - "$DNS_SERVICE_IP": {get_param: dns_service_ip} - "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/make-cert.sh} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - configure_etcd: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh} - - write_kube_os_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh} - - configure_kubernetes: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh} - - write_network_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-network-config.sh} - - write_prometheus_configmap: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-prometheus-configmap.yaml} - - - write_prometheus_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-prometheus-service.yaml} - - write_grafana_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/write-grafana-service.yaml} - params: - "$ADMIN_PASSWD": {get_param: grafana_admin_passwd} - - enable_monitoring: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/enable-monitoring.sh} - params: - "$ADMIN_PASSWD": {get_param: grafana_admin_passwd} - - network_config_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/network-config-service.sh} - - enable_services: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh} - - network_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh} - - enable_kube_controller_manager_scheduler: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-controller-manager-scheduler.sh} - - kube_system_namespace_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/kube-system-namespace-service.sh} - - kube_ui_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh} - - enable_kube_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-proxy-master.sh} - - core_dns_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh} - - master_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/wc-notify-master.sh} - - disable_selinux: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} - - kube_master_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: disable_selinux} - - config: {get_resource: write_heat_params} - - config: {get_resource: configure_etcd} - - config: {get_resource: write_kube_os_config} - - config: {get_resource: make_cert} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: configure_kubernetes} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_services} - - config: {get_resource: write_network_config} - - config: {get_resource: write_prometheus_configmap} - - config: {get_resource: write_prometheus_service} - - config: {get_resource: write_grafana_service} - - config: {get_resource: network_config_service} - - config: {get_resource: network_service} - - config: {get_resource: kube_system_namespace_service} - - config: {get_resource: core_dns_service} - - config: {get_resource: enable_kube_controller_manager_scheduler} - - config: {get_resource: enable_kube_proxy} - - config: {get_resource: kube_ui_service} - - config: {get_resource: enable_monitoring} - - config: {get_resource: master_wc_notify} - - ###################################################################### - # - # a single kubernetes master. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - kube-master: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: master_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_master_init} - networks: - - port: {get_resource: kube_master_eth0} - - kube_master_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - {get_param: secgroup_kube_master_id} - fixed_ips: - - subnet: {get_param: fixed_subnet} - allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} - replacement_policy: AUTO - - kube_master_floating: - type: Magnum::Optional::KubeMaster::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: kube_master_eth0} - - api_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_id} - address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: {get_param: kubernetes_port} - - etcd_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: etcd_pool_id} - address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 2379 - - ###################################################################### - # - # etcd storage. This allocates a cinder volume and attaches it - # to the master. - # - - etcd_volume: - type: Magnum::Optional::Etcd::Volume - properties: - size: {get_param: etcd_volume_size} - - etcd_volume_attach: - type: Magnum::Optional::Etcd::VolumeAttachment - properties: - instance_uuid: {get_resource: kube-master} - volume_id: {get_resource: etcd_volume} - mountpoint: /dev/vdc - - ###################################################################### - # - # docker storage. This allocates a cinder volume and attaches it - # to the minion. - # - - docker_volume: - type: Magnum::Optional::Cinder::Volume - properties: - size: {get_param: docker_volume_size} - volume_type: {get_param: docker_volume_type} - - docker_volume_attach: - type: Magnum::Optional::Cinder::VolumeAttachment - properties: - instance_uuid: {get_resource: kube-master} - volume_id: {get_resource: docker_volume} - mountpoint: /dev/vdb - -outputs: - - kube_master_ip: - value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" IP address of the Kubernetes master node. - - kube_master_external_ip: - value: {get_attr: [kube_master_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes master node. diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml deleted file mode 100644 index 6093b4da..00000000 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml +++ /dev/null @@ -1,467 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Kubernetes minion, This stack is - included by an AutoScalingGroup resource in the parent template - (kubecluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - minion_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - constraints: - - allowed_values: ["true", "false"] - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - default: "devicemapper" - constraints: - - allowed_values: ["devicemapper", "overlay"] - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - prometheus_monitoring: - type: boolean - description: > - whether or not to have the node-exporter running on the node - - kube_master_ip: - type: string - description: IP address of the Kubernetes master server. - - etcd_server_ip: - type: string - description: IP address of the Etcd server. - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - network_driver: - type: string - description: network driver to use for instantiating container networks - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - - registry_port: - type: number - description: port of registry service - - swift_region: - type: string - description: region of swift service - - registry_container: - type: string - description: > - name of swift container which docker registry stores images in - - registry_insecure: - type: boolean - description: > - indicates whether to skip TLS verification between registry and backend storage - - registry_chunksize: - type: number - description: > - size fo the data segments for the swift dynamic large objects - - secgroup_kube_minion_id: - type: string - description: ID of the security group for kubernetes minion. - - volume_driver: - type: string - description: volume driver to use for container storage - - region_name: - type: string - description: A logically separate section of the cluster - - tenant_name: - type: string - description: an alternative term for a project - - username: - type: string - description: > - user account - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for Kubernetes config file - hidden: true - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - trustee_domain_id: - type: string - description: domain id of the trustee - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_username: - type: string - description: username of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - auth_url: - type: string - description: > - url for keystone, must be v2 since k8s backend only support v2 - at this point - - insecure_registry_url: - type: string - description: insecure registry url - - dns_service_ip: - type: string - description: > - address used by Kubernetes DNS service - - dns_cluster_domain: - type: string - description: > - domain name for cluster DNS - -resources: - - minion_wait_handle: - type: OS::Heat::WaitConditionHandle - - minion_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube-minion - properties: - handle: {get_resource: minion_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.yaml} - params: - $PROMETHEUS_MONITORING: {get_param: prometheus_monitoring} - $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} - $KUBE_MASTER_IP: {get_param: kube_master_ip} - $KUBE_API_PORT: {get_param: kubernetes_port} - $KUBE_NODE_PUBLIC_IP: {get_attr: [kube_minion_floating, floating_ip_address]} - $KUBE_NODE_IP: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} - $ETCD_SERVER_IP: {get_param: etcd_server_ip} - $DOCKER_VOLUME: {get_resource: docker_volume} - $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} - $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} - $NETWORK_DRIVER: {get_param: network_driver} - $REGISTRY_ENABLED: {get_param: registry_enabled} - $REGISTRY_PORT: {get_param: registry_port} - $SWIFT_REGION: {get_param: swift_region} - $REGISTRY_CONTAINER: {get_param: registry_container} - $REGISTRY_INSECURE: {get_param: registry_insecure} - $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} - $TLS_DISABLED: {get_param: tls_disabled} - $CLUSTER_UUID: {get_param: cluster_uuid} - $MAGNUM_URL: {get_param: magnum_url} - $USERNAME: {get_param: username} - $PASSWORD: {get_param: password} - $VOLUME_DRIVER: {get_param: volume_driver} - $REGION_NAME: {get_param: region_name} - $TENANT_NAME: {get_param: tenant_name} - $HTTP_PROXY: {get_param: http_proxy} - $HTTPS_PROXY: {get_param: https_proxy} - $NO_PROXY: {get_param: no_proxy} - $KUBE_VERSION: {get_param: kube_version} - $WAIT_CURL: {get_attr: [minion_wait_handle, curl_cli]} - $TRUSTEE_USER_ID: {get_param: trustee_user_id} - $TRUSTEE_PASSWORD: {get_param: trustee_password} - $TRUST_ID: {get_param: trust_id} - $AUTH_URL: {get_param: auth_url} - $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} - $DNS_SERVICE_IP: {get_param: dns_service_ip} - $DNS_CLUSTER_DOMAIN: {get_param: dns_cluster_domain} - - write_kubeconfig: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-kubeconfig.yaml} - - write_kube_os_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - configure_docker_registry: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh} - - configure_kubernetes_minion: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh} - - network_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh} - - enable_services: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh} - - enable_docker_registry: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/fragments/enable-docker-registry.sh} - - enable_kube_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-proxy-minion.sh} - - enable_node_exporter: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-node-exporter.sh} - - minion_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - #!/bin/bash -v - wc_notify --data-binary '{"status": "SUCCESS"}' - params: - wc_notify: {get_attr: [minion_wait_handle, curl_cli]} - - disable_selinux: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} - - kube_minion_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: disable_selinux} - - config: {get_resource: write_heat_params} - - config: {get_resource: write_kubeconfig} - - config: {get_resource: write_kube_os_config} - - config: {get_resource: make_cert} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: configure_docker_registry} - - config: {get_resource: configure_kubernetes_minion} - - config: {get_resource: network_service} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_services} - - config: {get_resource: enable_kube_proxy} - - config: {get_resource: enable_node_exporter} - - config: {get_resource: enable_docker_registry} - - config: {get_resource: minion_wc_notify} - - ###################################################################### - # - # a single kubernetes minion. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - kube-minion: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: minion_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_minion_init} - networks: - - port: {get_resource: kube_minion_eth0} - - kube_minion_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - get_param: secgroup_kube_minion_id - fixed_ips: - - subnet: {get_param: fixed_subnet} - allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} - replacement_policy: AUTO - - kube_minion_floating: - type: Magnum::Optional::KubeMinion::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: kube_minion_eth0} - - ###################################################################### - # - # docker storage. This allocates a cinder volume and attaches it - # to the minion. - # - - docker_volume: - type: Magnum::Optional::Cinder::Volume - properties: - size: {get_param: docker_volume_size} - volume_type: {get_param: docker_volume_type} - - docker_volume_attach: - type: Magnum::Optional::Cinder::VolumeAttachment - properties: - instance_uuid: {get_resource: kube-minion} - volume_id: {get_resource: docker_volume} - mountpoint: /dev/vdb - -outputs: - - kube_minion_ip: - value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "public" IP address of the Kubernetes minion node. - - kube_minion_external_ip: - value: {get_attr: [kube_minion_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes minion node. - - OS::stack_id: - value: {get_param: "OS::stack_id"} - description: > - This is a id of the stack which creates from this template. diff --git a/magnum/drivers/k8s_fedora_atomic_v1/tools/grafana-prometheus-dashboard.json b/magnum/drivers/k8s_fedora_atomic_v1/tools/grafana-prometheus-dashboard.json deleted file mode 100644 index cd697650..00000000 --- a/magnum/drivers/k8s_fedora_atomic_v1/tools/grafana-prometheus-dashboard.json +++ /dev/null @@ -1,2079 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "Compliant with Prometheus 1.5.2", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__requires": [ - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "" - }, - { - "type": "panel", - "id": "singlestat", - "name": "Singlestat", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "3.1.1" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.3.0" - } - ], - "id": null, - "title": "Kubernetes cluster monitoring (via Prometheus)", - "description": "Monitors Kubernetes cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.", - "tags": [ - "kubernetes" - ], - "style": "dark", - "timezone": "browser", - "editable": true, - "hideControls": false, - "sharedCrosshair": false, - "rows": [ - { - "collapse": false, - "editable": true, - "height": "200px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)", - "thresholdLine": false - }, - "height": "200px", - "id": 32, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (rate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[1m]))", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "Received", - "metric": "network", - "refId": "A", - "step": 10 - }, - { - "expr": "- sum (rate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[1m]))", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "Sent", - "metric": "network", - "refId": "B", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Network I/O pressure", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "Network I/O pressure" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "180px", - "id": 4, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 4, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100", - "interval": "10s", - "intervalFactor": 1, - "refId": "A", - "step": 10 - } - ], - "thresholds": "65, 90", - "title": "Cluster memory usage", - "transparent": false, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "180px", - "id": 6, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 4, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100", - "interval": "10s", - "intervalFactor": 1, - "refId": "A", - "step": 10 - } - ], - "thresholds": "65, 90", - "title": "Cluster CPU usage (1m avg)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "180px", - "id": 7, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 4, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "A", - "step": 10 - } - ], - "thresholds": "65, 90", - "title": "Cluster filesystem usage", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "1px", - "id": 9, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "20%", - "prefix": "", - "prefixFontSize": "20%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 2, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", - "interval": "10s", - "intervalFactor": 1, - "refId": "A", - "step": 10 - } - ], - "thresholds": "", - "title": "Used", - "type": "singlestat", - "valueFontSize": "50%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "1px", - "id": 10, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 2, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})", - "interval": "10s", - "intervalFactor": 1, - "refId": "A", - "step": 10 - } - ], - "thresholds": "", - "title": "Total", - "type": "singlestat", - "valueFontSize": "50%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "1px", - "id": 11, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": " cores", - "postfixFontSize": "30%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 2, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m]))", - "interval": "10s", - "intervalFactor": 1, - "refId": "A", - "step": 10 - } - ], - "thresholds": "", - "title": "Used", - "type": "singlestat", - "valueFontSize": "50%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "1px", - "id": 12, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": " cores", - "postfixFontSize": "30%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 2, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})", - "interval": "10s", - "intervalFactor": 1, - "refId": "A", - "step": 10 - } - ], - "thresholds": "", - "title": "Total", - "type": "singlestat", - "valueFontSize": "50%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "1px", - "id": 13, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 2, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", - "interval": "10s", - "intervalFactor": 1, - "refId": "A", - "step": 10 - } - ], - "thresholds": "", - "title": "Used", - "type": "singlestat", - "valueFontSize": "50%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "height": "1px", - "id": 14, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 2, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", - "interval": "10s", - "intervalFactor": 1, - "refId": "A", - "step": 10 - } - ], - "thresholds": "", - "title": "Total", - "type": "singlestat", - "valueFontSize": "50%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - } - ], - "showTitle": false, - "title": "Total usage" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 3, - "editable": true, - "error": false, - "fill": 0, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "height": "", - "id": 17, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod_name)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "{{ pod_name }}", - "metric": "container_cpu", - "refId": "A", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Pods CPU usage (1m avg)", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "none", - "label": "cores", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "showTitle": false, - "title": "Pods CPU usage" - }, - { - "collapse": true, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 3, - "editable": true, - "error": false, - "fill": 0, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "height": "", - "id": 23, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "sum (rate (container_cpu_usage_seconds_total{systemd_service_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (systemd_service_name)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "{{ systemd_service_name }}", - "metric": "container_cpu", - "refId": "A", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "System services CPU usage (1m avg)", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "none", - "label": "cores", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "System services CPU usage" - }, - { - "collapse": true, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 3, - "editable": true, - "error": false, - "fill": 0, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "height": "", - "id": 24, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sideWidth": null, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (container_name, pod_name)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", - "metric": "container_cpu", - "refId": "A", - "step": 10 - }, - { - "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, name, image)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", - "metric": "container_cpu", - "refId": "B", - "step": 10 - }, - { - "expr": "sum (rate (container_cpu_usage_seconds_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", - "metric": "container_cpu", - "refId": "C", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Containers CPU usage (1m avg)", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "none", - "label": "cores", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "Containers CPU usage" - }, - { - "collapse": true, - "editable": true, - "height": "500px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 3, - "editable": true, - "error": false, - "fill": 0, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 20, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "sum (rate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "{{ id }}", - "metric": "container_cpu", - "refId": "A", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "All processes CPU usage (1m avg)", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "none", - "label": "cores", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "showTitle": false, - "title": "All processes CPU usage" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 0, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 25, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "{{ pod_name }}", - "metric": "container_memory_usage:sort_desc", - "refId": "A", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Pods memory usage", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "Pods memory usage" - }, - { - "collapse": true, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 0, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 26, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "sum (container_memory_working_set_bytes{systemd_service_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "{{ systemd_service_name }}", - "metric": "container_memory_usage:sort_desc", - "refId": "A", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "System services memory usage", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "System services memory usage" - }, - { - "collapse": true, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 0, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 27, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}) by (container_name, pod_name)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", - "metric": "container_memory_usage:sort_desc", - "refId": "A", - "step": 10 - }, - { - "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, name, image)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", - "metric": "container_memory_usage:sort_desc", - "refId": "B", - "step": 10 - }, - { - "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, rkt_container_name)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", - "metric": "container_memory_usage:sort_desc", - "refId": "C", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Containers memory usage", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "Containers memory usage" - }, - { - "collapse": true, - "editable": true, - "height": "500px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 0, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 28, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "{{ id }}", - "metric": "container_memory_usage:sort_desc", - "refId": "A", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "All processes memory usage", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "All processes memory usage" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 16, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod_name)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "-> {{ pod_name }}", - "metric": "network", - "refId": "A", - "step": 10 - }, - { - "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod_name)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "<- {{ pod_name }}", - "metric": "network", - "refId": "B", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Pods network I/O (1m avg)", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "Pods network I/O" - }, - { - "collapse": true, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 30, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (container_name, pod_name)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}", - "metric": "network", - "refId": "B", - "step": 10 - }, - { - "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (container_name, pod_name)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}", - "metric": "network", - "refId": "D", - "step": 10 - }, - { - "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, name, image)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", - "metric": "network", - "refId": "A", - "step": 10 - }, - { - "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, name, image)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", - "metric": "network", - "refId": "C", - "step": 10 - }, - { - "expr": "sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", - "metric": "network", - "refId": "E", - "step": 10 - }, - { - "expr": "- sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", - "metric": "network", - "refId": "F", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Containers network I/O (1m avg)", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "Containers network I/O" - }, - { - "collapse": true, - "editable": true, - "height": "500px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 29, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (rate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "-> {{ id }}", - "metric": "network", - "refId": "A", - "step": 10 - }, - { - "expr": "- sum (rate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "<- {{ id }}", - "metric": "network", - "refId": "B", - "step": 10 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "All processes network I/O (1m avg)", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "show": true - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "title": "All processes network I/O" - } - ], - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "templating": { - "list": [ - { - "allValue": ".*", - "current": {}, - "datasource": "${DS_PROMETHEUS}", - "hide": 0, - "includeAll": true, - "multi": false, - "name": "Node", - "options": [], - "query": "label_values(kubernetes_io_hostname)", - "refresh": 1, - "type": "query" - } - ] - }, - "annotations": { - "list": [] - }, - "refresh": "10s", - "schemaVersion": 12, - "version": 13, - "links": [], - "gnetId": 1621 -} \ No newline at end of file diff --git a/magnum/drivers/k8s_fedora_atomic_v1/version.py b/magnum/drivers/k8s_fedora_atomic_v1/version.py deleted file mode 100644 index 2de5b8fc..00000000 --- a/magnum/drivers/k8s_fedora_atomic_v1/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 - Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version = '1.0.0' -driver = 'k8s_fedora_atomic_v1' -container_version = '1.12.6' diff --git a/magnum/drivers/k8s_fedora_ironic_v1/__init__.py b/magnum/drivers/k8s_fedora_ironic_v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/k8s_fedora_ironic_v1/driver.py b/magnum/drivers/k8s_fedora_ironic_v1/driver.py deleted file mode 100644 index 7c018ea1..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/driver.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.drivers.common import k8s_monitor -from magnum.drivers.common.k8s_scale_manager import K8sScaleManager -from magnum.drivers.heat import driver -from magnum.drivers.k8s_fedora_ironic_v1 import template_def - - -class Driver(driver.HeatDriver): - - @property - def provides(self): - return [ - {'server_type': 'bm', - 'os': 'fedora', - 'coe': 'kubernetes'}, - ] - - def get_template_definition(self): - return template_def.FedoraK8sIronicTemplateDefinition() - - def get_monitor(self, context, cluster): - return k8s_monitor.K8sMonitor(context, cluster) - - def get_scale_manager(self, context, osclient, cluster): - return K8sScaleManager(context, osclient, cluster) diff --git a/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/Readme.md b/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/Readme.md deleted file mode 100644 index 760f7747..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/Readme.md +++ /dev/null @@ -1,45 +0,0 @@ -Kubernetes elements -=================== - -This directory contains `[diskimage-builder](https://github.com/openstack/diskimage-builder)` -elements to build an image which contains kubernetes required to use kubecluster-fedora-ironic.yaml. - -An example fedora based image and uploaded to glance with the following: - - # Install diskimage-builder in virtual environment - virtualenv . - . bin/activate - pip install diskimage-builder - git clone https://git.openstack.org/openstack/magnum - git clone https://git.openstack.org/openstack/dib-utils.git - export PATH="${PWD}/dib-utils/bin:$PATH" - export ELEMENTS_PATH=$(python -c 'import os, diskimage_builder, pkg_resources;print(os.path.abspath(pkg_resources.resource_filename(diskimage_builder.__name__, "elements")))') - export ELEMENTS_PATH=${ELEMENTS_PATH}:magnum/magnum/drivers/k8s_fedora_ironic_v1/image - export DIB_RELEASE=25 - disk-image-create baremetal \ - fedora selinux-permissive \ - kubernetes \ - -o fedora-25-kubernetes.qcow2 - - KERNEL_ID=`glance image-create --name fedora-k8s-kernel \ - --visibility public \ - --disk-format=aki \ - --container-format=aki \ - --file=fedora-25-kubernetes.vmlinuz \ - | grep id | tr -d '| ' | cut --bytes=3-57` - RAMDISK_ID=`glance image-create --name fedora-k8s-ramdisk \ - --visibility public \ - --disk-format=ari \ - --container-format=ari \ - --file=fedora-25-kubernetes.initrd \ - | grep id | tr -d '| ' | cut --bytes=3-57` - BASE_ID=`glance image-create --name fedora-k8s \ - --os-distro fedora \ - --visibility public \ - --disk-format=qcow2 \ - --container-format=bare \ - --property kernel_id=$KERNEL_ID \ - --property ramdisk_id=$RAMDISK_ID \ - --file=fedora-25-kubernetes.qcow2 \ - | grep -v kernel | grep -v ramdisk \ - | grep id | tr -d '| ' | cut --bytes=3-57` diff --git a/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/elements-deps b/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/elements-deps deleted file mode 100644 index 7076aba9..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/elements-deps +++ /dev/null @@ -1 +0,0 @@ -package-installs diff --git a/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/package-installs.yaml b/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/package-installs.yaml deleted file mode 100644 index 28b29771..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/image/kubernetes/package-installs.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kubernetes: -etcd: -flannel: -docker-io: -python2: diff --git a/magnum/drivers/k8s_fedora_ironic_v1/template_def.py b/magnum/drivers/k8s_fedora_ironic_v1/template_def.py deleted file mode 100644 index 6a43e523..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/template_def.py +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutronclient.common import exceptions as n_exception -from neutronclient.neutron import v2_0 as neutronV20 -import os - -from magnum.common import exception -from magnum.drivers.heat import k8s_fedora_template_def as kftd -from oslo_config import cfg - -CONF = cfg.CONF - - -class FedoraK8sIronicTemplateDefinition(kftd.K8sFedoraTemplateDefinition): - """Kubernetes template for a Fedora Baremetal.""" - - def __init__(self): - super(FedoraK8sIronicTemplateDefinition, self).__init__() - self.add_parameter('fixed_subnet', - cluster_template_attr='fixed_subnet', - param_type=str, - required=True) - - def get_fixed_network_id(self, osc, cluster_template): - try: - subnet = neutronV20.find_resource_by_name_or_id( - osc.neutron(), - 'subnet', - cluster_template.fixed_subnet - ) - except n_exception.NeutronException as e: - # NOTE(yuanying): NeutronCLIError doesn't have status_code - # if subnet name is duplicated, NeutronClientNoUniqueMatch - # (which is kind of NeutronCLIError) will be raised. - if getattr(e, 'status_code', 400) < 500: - raise exception.InvalidSubnet(message=("%s" % e)) - else: - raise e - - if subnet['ip_version'] != 4: - raise exception.InvalidSubnet( - message="Subnet IP version should be 4" - ) - - return subnet['network_id'] - - def get_params(self, context, cluster_template, cluster, **kwargs): - ep = kwargs.pop('extra_params', {}) - - osc = self.get_osc(context) - ep['fixed_network'] = self.get_fixed_network_id(osc, cluster_template) - - return super(FedoraK8sIronicTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=ep, - **kwargs) - - @property - def driver_module_path(self): - return __name__[:__name__.rindex('.')] - - @property - def template_path(self): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'templates/kubecluster.yaml') diff --git a/magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml b/magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml deleted file mode 100644 index 29b75b87..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml +++ /dev/null @@ -1,626 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This template will boot a Kubernetes cluster with one or more - minions (as specified by the number_of_minions parameter, which - defaults to 1). - -parameters: - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - default: public - - fixed_network: - type: string - description: > - name of private network into which servers get deployed - Important: the Load Balancer feature in Kubernetes requires that - the name for the fixed_network must be "private" for the - address lookup in Kubernetes to work properly - - fixed_subnet: - type: string - description: Sub network from which to allocate fixed addresses. - default: private-subnet - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - default: m1.small - description: flavor to use when booting the server - - minion_flavor: - type: string - default: baremetal - description: flavor to use when booting the server - - prometheus_monitoring: - type: boolean - default: false - description: > - whether or not to have the grafana-prometheus-cadvisor monitoring setup - - grafana_admin_passwd: - type: string - default: admin - hidden: true - description: > - admin user password for the Grafana monitoring interface - - dns_nameserver: - type: string - description: address of a dns nameserver reachable in your environment - default: 8.8.8.8 - - number_of_masters: - type: number - description: how many kubernetes masters to spawn - default: 1 - - number_of_minions: - type: number - description: how many kubernetes minions to spawn - default: 1 - - portal_network_cidr: - type: string - description: > - address range used by kubernetes for service portals - default: 10.254.0.0/16 - - network_driver: - type: string - description: network driver to use for instantiating container networks - default: flannel - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - default: 10.100.0.0/16 - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each minion - default: 24 - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - default: "udp" - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - system_pods_initial_delay: - type: number - description: > - health check, time to wait for system pods (podmaster, scheduler) to boot - (in seconds) - default: 30 - - system_pods_timeout: - type: number - description: > - health check, timeout for system pods (podmaster, scheduler) to answer. - (in seconds) - default: 5 - - admission_control_list: - type: string - description: > - List of admission control plugins to activate - default: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - default: "true" - constraints: - - allowed_values: ["true", "false"] - - etcd_volume_size: - type: number - description: > - size of the cinder volume for etcd storage - default: 0 - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - default: 0 - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - default: "devicemapper" - constraints: - - allowed_values: ["devicemapper", "overlay"] - - wait_condition_timeout: - type: number - description: > - timeout for the Wait Conditions - default: 6000 - - minions_to_remove: - type: comma_delimited_list - description: > - List of minions to be removed when doing an update. Individual minion may - be referenced several ways: (1) The resource name (e.g. ['1', '3']), - (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should - be empty when doing an create. - default: [] - - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - default: false - - registry_port: - type: number - description: port of registry service - default: 5000 - - swift_region: - type: string - description: region of swift service - default: "" - - registry_container: - type: string - description: > - name of swift container which docker registry stores images in - default: "container" - - registry_insecure: - type: boolean - description: > - indicates whether to skip TLS verification between registry and backend storage - default: true - - registry_chunksize: - type: number - description: > - size fo the data segments for the swift dynamic large objects - default: 5242880 - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - region_name: - type: string - description: A logically separate section of the cluster - - username: - type: string - description: > - user account - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for Kubernetes config file - default: - ChangeMe - hidden: true - - tenant_name: - type: string - description: > - tenant name - - loadbalancing_protocol: - type: string - description: > - The protocol which is used for load balancing. If you want to change - tls_disabled option to 'True', please change this to "HTTP". - default: TCP - constraints: - - allowed_values: ["TCP", "HTTP"] - - tls_disabled: - type: boolean - description: whether or not to disable TLS - default: False - - kube_dashboard_enabled: - type: boolean - description: whether or not to disable kubernetes dashboard - default: True - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - default: 6443 - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - http_proxy: - type: string - description: http proxy address for docker - default: "" - - https_proxy: - type: string - description: https proxy address for docker - default: "" - - no_proxy: - type: string - description: no proxies for docker - default: "" - - trustee_domain_id: - type: string - description: domain id of the trustee - default: "" - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_username: - type: string - description: username of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - auth_url: - type: string - description: url for keystone - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - default: v1.5.3 - - kube_dashboard_version: - type: string - description: version of kubernetes dashboard used for kubernetes cluster - default: v1.5.1 - - insecure_registry_url: - type: string - description: insecure registry url - default: "" - -resources: - - api_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_param: fixed_subnet} - external_network: {get_param: external_network} - protocol: {get_param: loadbalancing_protocol} - port: {get_param: kubernetes_port} - - etcd_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_param: fixed_subnet} - external_network: {get_param: external_network} - protocol: {get_param: loadbalancing_protocol} - port: 2379 - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup_base: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - secgroup_kube_master: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: tcp - port_range_min: 7080 - port_range_max: 7080 - - protocol: tcp - port_range_min: 8080 - port_range_max: 8080 - - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - - protocol: tcp - port_range_min: 6443 - port_range_max: 6443 - - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - - secgroup_kube_minion: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # resources that expose the IPs of either the kube master or a given - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_attr: [api_lb, floating_address]} - pool_private_ip: {get_attr: [api_lb, address]} - master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} - master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} - - etcd_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_private_ip: {get_attr: [etcd_lb, address]} - master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} - - ###################################################################### - # - # resources that expose the IPs of either floating ip or a given - # fixed ip depending on whether FloatingIP is enabled for the cluster. - # - - api_address_floating_switch: - type: Magnum::FloatingIPAddressSwitcher - properties: - public_ip: {get_attr: [api_address_lb_switch, public_ip]} - private_ip: {get_attr: [api_address_lb_switch, private_ip]} - - ###################################################################### - # - # kubernetes masters. This is a resource group that will create - # masters. - # - - kube_masters: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: number_of_masters} - resource_def: - type: kubemaster.yaml - properties: - prometheus_monitoring: {get_param: prometheus_monitoring} - grafana_admin_passwd: {get_param: grafana_admin_passwd} - api_public_address: {get_attr: [api_lb, floating_address]} - api_private_address: {get_attr: [api_lb, address]} - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - master_flavor: {get_param: master_flavor} - external_network: {get_param: external_network} - kube_allow_priv: {get_param: kube_allow_priv} - docker_volume_size: {get_param: docker_volume_size} - docker_storage_driver: {get_param: docker_storage_driver} - network_driver: {get_param: network_driver} - flannel_network_cidr: {get_param: flannel_network_cidr} - flannel_network_subnetlen: {get_param: flannel_network_subnetlen} - flannel_backend: {get_param: flannel_backend} - system_pods_initial_delay: {get_param: system_pods_initial_delay} - system_pods_timeout: {get_param: system_pods_timeout} - portal_network_cidr: {get_param: portal_network_cidr} - admission_control_list: {get_param: admission_control_list} - discovery_url: {get_param: discovery_url} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - fixed_network: {get_param: fixed_network} - fixed_subnet: {get_param: fixed_subnet} - api_pool_id: {get_attr: [api_lb, pool_id]} - etcd_pool_id: {get_attr: [etcd_lb, pool_id]} - username: {get_param: username} - password: {get_param: password} - tenant_name: {get_param: tenant_name} - kubernetes_port: {get_param: kubernetes_port} - tls_disabled: {get_param: tls_disabled} - kube_dashboard_enabled: {get_param: kube_dashboard_enabled} - secgroup_base_id: {get_resource: secgroup_base} - secgroup_kube_master_id: {get_resource: secgroup_kube_master} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - kube_version: {get_param: kube_version} - kube_dashboard_version: {get_param: kube_dashboard_version} - trustee_user_id: {get_param: trustee_user_id} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - insecure_registry_url: {get_param: insecure_registry_url} - wc_curl_cli: {get_attr: [master_wait_handle, curl_cli]} - etcd_lb_vip: {get_attr: [etcd_lb, address]} - - ###################################################################### - # - # wait condition handler for kubernetes master - # - - master_wait_handle: - type: OS::Heat::WaitConditionHandle - - master_wait_condition: - type: OS::Heat::WaitCondition - properties: - count: {get_param: number_of_masters} - handle: {get_resource: master_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # kubernetes minions. This is an resource group that will initially - # create minions, and needs to be manually scaled. - # - - kube_minions: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: number_of_minions} - removal_policies: [{resource_list: {get_param: minions_to_remove}}] - resource_def: - type: kubeminion.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - minion_flavor: {get_param: minion_flavor} - fixed_network: {get_param: fixed_network} - external_network: {get_param: external_network} - kube_software_configs: {get_attr: [kubeminion_software_configs, kube_minion_init]} - - ###################################################################### - # - # Software configs for kubernetes minions - # - - kubeminion_software_configs: - type: kubeminion_software_configs.yaml - properties: - prometheus_monitoring: {get_param: prometheus_monitoring} - network_driver: {get_param: network_driver} - kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} - etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} - kube_allow_priv: {get_param: kube_allow_priv} - docker_volume_size: {get_param: docker_volume_size} - docker_storage_driver: {get_param: docker_storage_driver} - registry_enabled: {get_param: registry_enabled} - registry_port: {get_param: registry_port} - swift_region: {get_param: swift_region} - registry_container: {get_param: registry_container} - registry_insecure: {get_param: registry_insecure} - registry_chunksize: {get_param: registry_chunksize} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - volume_driver: {get_param: volume_driver} - region_name: {get_param: region_name} - tenant_name: {get_param: tenant_name} - auth_url: {get_param: auth_url} - username: {get_param: username} - password: {get_param: password} - kubernetes_port: {get_param: kubernetes_port} - tls_disabled: {get_param: tls_disabled} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - kube_version: {get_param: kube_version} - trustee_user_id: {get_param: trustee_user_id} - trustee_username: {get_param: trustee_username} - trustee_password: {get_param: trustee_password} - trustee_domain_id: {get_param: trustee_domain_id} - trust_id: {get_param: trust_id} - insecure_registry_url: {get_param: insecure_registry_url} - wc_curl_cli: {get_attr: [minion_wait_handle, curl_cli]} - - ###################################################################### - # - # wait condition handler for kubernetes minions - # - - minion_wait_handle: - type: OS::Heat::WaitConditionHandle - - minion_wait_condition: - type: OS::Heat::WaitCondition - properties: - count: {get_param: number_of_minions} - handle: {get_resource: minion_wait_handle} - timeout: {get_param: wait_condition_timeout} - -outputs: - - api_address: - value: - str_replace: - template: api_ip_address - params: - api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} - description: > - This is the API endpoint of the Kubernetes cluster. Use this to access - the Kubernetes API. - - registry_address: - value: - str_replace: - template: localhost:port - params: - port: {get_param: registry_port} - description: - This is the url of docker registry server where you can store docker - images. - - kube_masters_private: - value: {get_attr: [kube_masters, kube_master_ip]} - description: > - This is a list of the "private" IP addresses of all the Kubernetes masters. - - kube_masters: - value: {get_attr: [kube_masters, kube_master_external_ip]} - description: > - This is a list of the "public" IP addresses of all the Kubernetes masters. - Use these IP addresses to log in to the Kubernetes masters via ssh. - - kube_minions_private: - value: {get_attr: [kube_minions, kube_minion_ip]} - description: > - This is a list of the "private" IP addresses of all the Kubernetes minions. - - kube_minions: - value: {get_attr: [kube_minions, kube_minion_external_ip]} - description: > - This is a list of the "public" IP addresses of all the Kubernetes minions. - Use these IP addresses to log in to the Kubernetes minions via ssh. diff --git a/magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml b/magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml deleted file mode 100644 index 3ca775bc..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml +++ /dev/null @@ -1,511 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Kubernetes master, This stack is - included by an ResourceGroup resource in the parent template - (kubecluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - portal_network_cidr: - type: string - description: > - address range used by kubernetes for service portals - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - constraints: - - allowed_values: ["true", "false"] - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - default: "devicemapper" - constraints: - - allowed_values: ["devicemapper", "overlay"] - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each master - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - system_pods_initial_delay: - type: number - description: > - health check, time to wait for system pods (podmaster, scheduler) to boot - (in seconds) - default: 30 - - system_pods_timeout: - type: number - description: > - health check, timeout for system pods (podmaster, scheduler) to answer. - (in seconds) - default: 5 - - admission_control_list: - type: string - description: > - List of admission control plugins to activate - - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - kube_dashboard_enabled: - type: boolean - description: whether or not to disable kubernetes dashboard - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - prometheus_monitoring: - type: boolean - description: > - whether or not to have prometheus and grafana deployed - - grafana_admin_passwd: - type: string - hidden: true - description: > - admin user password for the Grafana monitoring interface - - api_public_address: - type: string - description: Public IP address of the Kubernetes master server. - default: "" - - api_private_address: - type: string - description: Private IP address of the Kubernetes master server. - default: "" - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - network_driver: - type: string - description: network driver to use for instantiating container networks - - secgroup_base_id: - type: string - description: ID of the security group for base. - - secgroup_kube_master_id: - type: string - description: ID of the security group for kubernetes master. - - api_pool_id: - type: string - description: ID of the load balancer pool of k8s API server. - - etcd_pool_id: - type: string - description: ID of the load balancer pool of etcd server. - - auth_url: - type: string - description: > - url for kubernetes to authenticate - - username: - type: string - description: > - user account - - password: - type: string - description: > - user password - - tenant_name: - type: string - description: > - tenant name - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - kube_dashboard_version: - type: string - description: version of kubernetes dashboard used for kubernetes cluster - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - insecure_registry_url: - type: string - description: insecure registry url - - wc_curl_cli: - type: string - description : > - Wait condition notify command for Master. - - etcd_lb_vip: - type: string - description: > - etcd lb vip private used to generate certs on master. - default: "" - -resources: - - ###################################################################### - # - # resource that exposes the IPs of either the kube master or the API - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_param: api_public_address} - pool_private_ip: {get_param: api_private_address} - master_public_ip: '' - master_private_ip: '' - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml} - params: - "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} - "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} - "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} - "$KUBE_API_PORT": {get_param: kubernetes_port} - "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} - "$DOCKER_VOLUME": 'None' - "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} - "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} - "$NETWORK_DRIVER": {get_param: network_driver} - "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} - "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} - "$FLANNEL_BACKEND": {get_param: flannel_backend} - "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} - "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} - "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} - "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} - "$ETCD_DISCOVERY_URL": {get_param: discovery_url} - "$AUTH_URL": {get_param: auth_url} - "$USERNAME": {get_param: username} - "$PASSWORD": {get_param: password} - "$TENANT_NAME": {get_param: tenant_name} - "$CLUSTER_SUBNET": {get_param: fixed_subnet} - "$TLS_DISABLED": {get_param: tls_disabled} - "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$KUBE_VERSION": {get_param: kube_version} - "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} - "$WAIT_CURL": {get_param: wc_curl_cli} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} - "$ENABLE_CINDER": "False" - "$ETCD_LB_VIP": {get_param: etcd_lb_vip} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/make-cert.sh} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - configure_etcd: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh} - - write_kube_os_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh} - - configure_kubernetes: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh} - - write_network_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-network-config.sh} - - write_prometheus_configmap: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-prometheus-configmap.yaml} - - - write_prometheus_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-prometheus-service.yaml} - - write_grafana_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/write-grafana-service.yaml} - params: - "$ADMIN_PASSWD": {get_param: grafana_admin_passwd} - - enable_monitoring: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/enable-monitoring.sh} - params: - "$ADMIN_PASSWD": {get_param: grafana_admin_passwd} - - network_config_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/network-config-service.sh} - - enable_services: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh} - - network_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh} - - enable_kube_controller_manager_scheduler: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-controller-manager-scheduler.sh} - - kube_system_namespace_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/kube-system-namespace-service.sh} - - kube_ui_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh} - - enable_kube_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-proxy-master.sh} - - master_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/wc-notify-master.sh} - - disable_selinux: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} - - kube_master_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: disable_selinux} - - config: {get_resource: write_heat_params} - - config: {get_resource: configure_etcd} - - config: {get_resource: write_kube_os_config} - - config: {get_resource: make_cert} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: configure_kubernetes} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_services} - - config: {get_resource: write_network_config} - - config: {get_resource: write_prometheus_configmap} - - config: {get_resource: write_prometheus_service} - - config: {get_resource: write_grafana_service} - - config: {get_resource: network_config_service} - - config: {get_resource: network_service} - - config: {get_resource: kube_system_namespace_service} - - config: {get_resource: enable_kube_controller_manager_scheduler} - - config: {get_resource: enable_kube_proxy} - - config: {get_resource: kube_ui_service} - - config: {get_resource: enable_monitoring} - - config: {get_resource: master_wc_notify} - - ###################################################################### - # - # a single kubernetes master. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - kube-master: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: master_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_master_init} - networks: - - network: {get_param: fixed_network} - - kube_master_floating: - type: OS::Neutron::FloatingIP - properties: - floating_network: - get_param: external_network - port_id: - get_attr: [kube-master, addresses, {get_param: fixed_network}, 0, port] - - api_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_id} - address: {get_attr: [kube-master, networks, private, 0]} - subnet: { get_param: fixed_subnet } - protocol_port: {get_param: kubernetes_port} - - etcd_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: etcd_pool_id} - address: {get_attr: [kube-master, networks, private, 0]} - subnet: { get_param: fixed_subnet } - protocol_port: 2379 - -outputs: - - kube_master_ip: - value: {get_attr: [kube-master, networks, private, 0]} - description: > - This is the "private" IP address of the Kubernetes master node. - - kube_master_external_ip: - value: {get_attr: [kube_master_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes master node. diff --git a/magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion.yaml b/magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion.yaml deleted file mode 100644 index 4b3dc135..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion.yaml +++ /dev/null @@ -1,79 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Kubernetes minion, This stack is - included by an AutoScalingGroup resource in the parent template - (kubecluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - minion_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - kube_software_configs: - type: string - description : > - ID of the multipart mime. - -resources: - - ###################################################################### - # - # a single kubernetes minion. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - kube-minion: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: minion_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_param: kube_software_configs} - networks: - - network: {get_param: fixed_network} - - kube_minion_floating: - type: OS::Neutron::FloatingIP - properties: - floating_network: - get_param: external_network - port_id: - get_attr: [kube-minion, addresses, {get_param: fixed_network}, 0, port] - -outputs: - - kube_minion_ip: - value: {get_attr: [kube-minion, networks, private, 0]} - description: > - This is the "public" IP address of the Kubernetes minion node. - - kube_minion_external_ip: - value: {get_attr: [kube_minion_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes minion node. - - OS::stack_id: - value: {get_param: "OS::stack_id"} - description: > - This is a id of the stack which creates from this template. diff --git a/magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion_software_configs.yaml b/magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion_software_configs.yaml deleted file mode 100644 index f1955a71..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/templates/kubeminion_software_configs.yaml +++ /dev/null @@ -1,331 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines software configs for Kubernetes minions. - -parameters: - - kube_allow_priv: - type: string - description: > - whether or not kubernetes should permit privileged containers. - constraints: - - allowed_values: ["true", "false"] - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - default: "devicemapper" - constraints: - - allowed_values: ["devicemapper", "overlay"] - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - kubernetes_port: - type: number - description: > - The port which are used by kube-apiserver to provide Kubernetes - service. - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - prometheus_monitoring: - type: boolean - description: > - whether or not to have the node-exporter running on the node - - kube_master_ip: - type: string - description: IP address of the Kubernetes master server. - - etcd_server_ip: - type: string - description: IP address of the Etcd server. - - network_driver: - type: string - description: network driver to use for instantiating container networks - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - - registry_port: - type: number - description: port of registry service - - swift_region: - type: string - description: region of swift service - - registry_container: - type: string - description: > - name of swift container which docker registry stores images in - - registry_insecure: - type: boolean - description: > - indicates whether to skip TLS verification between registry and backend storage - - registry_chunksize: - type: number - description: > - size fo the data segments for the swift dynamic large objects - - volume_driver: - type: string - description: volume driver to use for container storage - - region_name: - type: string - description: A logically separate section of the cluster - - tenant_name: - type: string - description: an alternative term for a project - - username: - type: string - description: > - user account - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for Kubernetes config file - hidden: true - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - trustee_domain_id: - type: string - description: domain id of the trustee - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_username: - type: string - description: username of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - auth_url: - type: string - description: > - url for keystone, must be v2 since k8s backend only support v2 - at this point - - insecure_registry_url: - type: string - description: insecure registry url - - wc_curl_cli: - type: string - description : > - Wait condition notify command for Minion. - -resources: - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.yaml} - params: - $PROMETHEUS_MONITORING: {get_param: prometheus_monitoring} - $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} - $KUBE_MASTER_IP: {get_param: kube_master_ip} - $KUBE_API_PORT: {get_param: kubernetes_port} - $ETCD_SERVER_IP: {get_param: etcd_server_ip} - $DOCKER_VOLUME: 'None' - $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} - $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} - $NETWORK_DRIVER: {get_param: network_driver} - $REGISTRY_ENABLED: {get_param: registry_enabled} - $REGISTRY_PORT: {get_param: registry_port} - $SWIFT_REGION: {get_param: swift_region} - $REGISTRY_CONTAINER: {get_param: registry_container} - $REGISTRY_INSECURE: {get_param: registry_insecure} - $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} - $TLS_DISABLED: {get_param: tls_disabled} - $CLUSTER_UUID: {get_param: cluster_uuid} - $MAGNUM_URL: {get_param: magnum_url} - $USERNAME: {get_param: username} - $PASSWORD: {get_param: password} - $VOLUME_DRIVER: {get_param: volume_driver} - $REGION_NAME: {get_param: region_name} - $TENANT_NAME: {get_param: tenant_name} - $HTTP_PROXY: {get_param: http_proxy} - $HTTPS_PROXY: {get_param: https_proxy} - $NO_PROXY: {get_param: no_proxy} - $KUBE_VERSION: {get_param: kube_version} - $WAIT_CURL: {get_param: wc_curl_cli} - $TRUSTEE_DOMAIN_ID: {get_param: trustee_domain_id} - $TRUSTEE_USER_ID: {get_param: trustee_user_id} - $TRUSTEE_USERNAME: {get_param: trustee_username} - $TRUSTEE_PASSWORD: {get_param: trustee_password} - $TRUST_ID: {get_param: trust_id} - $AUTH_URL: {get_param: auth_url} - $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} - $ENABLE_CINDER: "False" - - write_kubeconfig: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/write-kubeconfig.yaml} - - make_cert: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - configure_docker_registry: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh} - - configure_kubernetes_minion: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh} - - network_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh} - - enable_services: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh} - - enable_docker_registry: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/fragments/enable-docker-registry.sh} - - enable_kube_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-proxy-minion.sh} - - enable_node_exporter: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/enable-node-exporter.sh} - - minion_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - #!/bin/bash -v - wc_notify --data-binary '{"status": "SUCCESS"}' - params: - wc_notify: {get_param: wc_curl_cli} - - disable_selinux: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/disable-selinux.sh} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh} - - kube_minion_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: disable_selinux} - - config: {get_resource: write_heat_params} - - config: {get_resource: write_kubeconfig} - - config: {get_resource: make_cert} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: configure_docker_registry} - - config: {get_resource: configure_kubernetes_minion} - - config: {get_resource: network_service} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_services} - - config: {get_resource: enable_kube_proxy} - - config: {get_resource: enable_node_exporter} - - config: {get_resource: enable_docker_registry} - - config: {get_resource: minion_wc_notify} - -outputs: - kube_minion_init: - value: {get_resource: kube_minion_init} - description: ID of the multipart mime for kubeminion. diff --git a/magnum/drivers/k8s_fedora_ironic_v1/version.py b/magnum/drivers/k8s_fedora_ironic_v1/version.py deleted file mode 100644 index 785358af..00000000 --- a/magnum/drivers/k8s_fedora_ironic_v1/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 - Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version = '1.0.0' -driver = 'k8s_fedora_ironic_v1' -container_version = '1.12.6' diff --git a/magnum/drivers/mesos_ubuntu_v1/COPYING b/magnum/drivers/mesos_ubuntu_v1/COPYING deleted file mode 100644 index d6456956..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/magnum/drivers/mesos_ubuntu_v1/__init__.py b/magnum/drivers/mesos_ubuntu_v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/mesos_ubuntu_v1/driver.py b/magnum/drivers/mesos_ubuntu_v1/driver.py deleted file mode 100644 index c40f2395..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/driver.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.drivers.heat import driver -from magnum.drivers.mesos_ubuntu_v1 import monitor -from magnum.drivers.mesos_ubuntu_v1.scale_manager import MesosScaleManager -from magnum.drivers.mesos_ubuntu_v1 import template_def - - -class Driver(driver.HeatDriver): - - @property - def provides(self): - return [ - {'server_type': 'vm', - 'os': 'ubuntu', - 'coe': 'mesos'}, - ] - - def get_template_definition(self): - return template_def.UbuntuMesosTemplateDefinition() - - def get_monitor(self, context, cluster): - return monitor.MesosMonitor(context, cluster) - - def get_scale_manager(self, context, osclient, cluster): - return MesosScaleManager(context, osclient, cluster) diff --git a/magnum/drivers/mesos_ubuntu_v1/image/Dockerfile b/magnum/drivers/mesos_ubuntu_v1/image/Dockerfile deleted file mode 100644 index af17f456..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM ubuntu:trusty - -RUN \ - apt-get -yqq update && \ - apt-get -yqq install git qemu-utils python-dev python-pip python-yaml python-six uuid-runtime curl sudo kpartx parted wget && \ - pip install diskimage-builder && \ - mkdir /output - -WORKDIR /build - -ENV PATH="dib-utils/bin:$PATH" ELEMENTS_PATH="$(python -c 'import os, diskimage_builder, pkg_resources;print(os.path.abspath(pkg_resources.resource_filename(diskimage_builder.__name__, "elements")))'):tripleo-image-elements/elements:heat-templates/hot/software-config/elements:magnum/magnum/drivers/mesos_ubuntu_v1/image" DIB_RELEASE=trusty - -RUN git clone https://git.openstack.org/openstack/magnum -RUN git clone https://git.openstack.org/openstack/dib-utils.git -RUN git clone https://git.openstack.org/openstack/tripleo-image-elements.git -RUN git clone https://git.openstack.org/openstack/heat-templates.git - -CMD disk-image-create ubuntu vm docker mesos os-collect-config os-refresh-config os-apply-config heat-config heat-config-script -o /output/ubuntu-mesos.qcow2 diff --git a/magnum/drivers/mesos_ubuntu_v1/image/README.md b/magnum/drivers/mesos_ubuntu_v1/image/README.md deleted file mode 100644 index 9de8b236..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Mesos elements -============== - -See [Building an image](http://docs.openstack.org/developer/magnum/userguide.html#building-mesos-image) for instructions. diff --git a/magnum/drivers/mesos_ubuntu_v1/image/docker/elements-deps b/magnum/drivers/mesos_ubuntu_v1/image/docker/elements-deps deleted file mode 100644 index 7076aba9..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/docker/elements-deps +++ /dev/null @@ -1 +0,0 @@ -package-installs diff --git a/magnum/drivers/mesos_ubuntu_v1/image/docker/package-installs.yaml b/magnum/drivers/mesos_ubuntu_v1/image/docker/package-installs.yaml deleted file mode 100644 index cda53ec4..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/docker/package-installs.yaml +++ /dev/null @@ -1 +0,0 @@ -docker-engine: diff --git a/magnum/drivers/mesos_ubuntu_v1/image/docker/post-install.d/60-disable-docker-service b/magnum/drivers/mesos_ubuntu_v1/image/docker/post-install.d/60-disable-docker-service deleted file mode 100755 index a6026168..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/docker/post-install.d/60-disable-docker-service +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -service docker stop -[ -f /etc/init/docker.conf ] && echo "manual" > /etc/init/docker.override diff --git a/magnum/drivers/mesos_ubuntu_v1/image/docker/pre-install.d/10-add-docker-repo b/magnum/drivers/mesos_ubuntu_v1/image/docker/pre-install.d/10-add-docker-repo deleted file mode 100755 index 67cd9708..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/docker/pre-install.d/10-add-docker-repo +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 \ - --recv-keys 58118E89F3A912897C070ADBF76221572C52609D - -DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') -RELEASE=$(lsb_release -ics | tail -1 | tr '[:upper:]' '[:lower:]') - -# Add the repository -echo "deb http://apt.dockerproject.org/repo ${DISTRO}-${RELEASE} main" | \ - sudo tee /etc/apt/sources.list.d/docker.list diff --git a/magnum/drivers/mesos_ubuntu_v1/image/install_imagebuild_deps.sh b/magnum/drivers/mesos_ubuntu_v1/image/install_imagebuild_deps.sh deleted file mode 100755 index e1a3bd91..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/install_imagebuild_deps.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# This script installs all needed dependencies to generate -# images using diskimage-builder. Please not it only has been -# tested on Ubuntu Trusty - -set -eux -set -o pipefail - -sudo apt-get update || true -sudo apt-get install -y \ - git \ - qemu-utils \ - python-dev \ - python-yaml \ - python-six \ - uuid-runtime \ - curl \ - sudo \ - kpartx \ - parted \ - wget diff --git a/magnum/drivers/mesos_ubuntu_v1/image/mesos/elements-deps b/magnum/drivers/mesos_ubuntu_v1/image/mesos/elements-deps deleted file mode 100644 index 7076aba9..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/mesos/elements-deps +++ /dev/null @@ -1 +0,0 @@ -package-installs diff --git a/magnum/drivers/mesos_ubuntu_v1/image/mesos/package-installs.yaml b/magnum/drivers/mesos_ubuntu_v1/image/mesos/package-installs.yaml deleted file mode 100644 index 89756498..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/mesos/package-installs.yaml +++ /dev/null @@ -1,3 +0,0 @@ -zookeeperd: -mesos: -marathon: diff --git a/magnum/drivers/mesos_ubuntu_v1/image/mesos/post-install.d/60-disable-upstart b/magnum/drivers/mesos_ubuntu_v1/image/mesos/post-install.d/60-disable-upstart deleted file mode 100755 index 199dc6e0..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/mesos/post-install.d/60-disable-upstart +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -for service in zookeeper mesos-slave mesos-master marathon; do - service $service stop - [ -f /etc/init/$service.conf ] && echo "manual" > /etc/init/$service.override -done diff --git a/magnum/drivers/mesos_ubuntu_v1/image/mesos/pre-install.d/10-apt-repo b/magnum/drivers/mesos_ubuntu_v1/image/mesos/pre-install.d/10-apt-repo deleted file mode 100755 index 0c103372..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/mesos/pre-install.d/10-apt-repo +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF -DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') -CODENAME=$(lsb_release -cs) - -# Add the repository -echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \ - sudo tee /etc/apt/sources.list.d/mesosphere.list - -# Install Java 8 requirements for marathon -sudo add-apt-repository -y ppa:openjdk-r/ppa -sudo apt-get -y update -sudo apt-get -y install openjdk-8-jre-headless diff --git a/magnum/drivers/mesos_ubuntu_v1/image/validate_image.sh b/magnum/drivers/mesos_ubuntu_v1/image/validate_image.sh deleted file mode 100755 index 362ef299..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/image/validate_image.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -# check that image is valid -qemu-img check -q $1 - -# validate estimated size -FILESIZE=$(stat -c%s "$1") -MIN_SIZE=681574400 # 650MB - -if [ $FILESIZE -lt $MIN_SIZE ] ; then - echo "Error: generated image size is lower than expected." - exit 1 -fi diff --git a/magnum/drivers/mesos_ubuntu_v1/monitor.py b/magnum/drivers/mesos_ubuntu_v1/monitor.py deleted file mode 100644 index a520e2e1..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/monitor.py +++ /dev/null @@ -1,71 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils - -from magnum.common import urlfetch -from magnum.conductor import monitors - - -class MesosMonitor(monitors.MonitorBase): - - def __init__(self, context, cluster): - super(MesosMonitor, self).__init__(context, cluster) - self.data = {} - - @property - def metrics_spec(self): - return { - 'memory_util': { - 'unit': '%', - 'func': 'compute_memory_util', - }, - 'cpu_util': { - 'unit': '%', - 'func': 'compute_cpu_util', - }, - } - - def _build_url(self, url, protocol='http', port='80', path='/'): - return protocol + '://' + url + ':' + port + path - - def _is_leader(self, state): - return state['leader'] == state['pid'] - - def pull_data(self): - self.data['mem_total'] = 0 - self.data['mem_used'] = 0 - self.data['cpu_total'] = 0 - self.data['cpu_used'] = 0 - for master_addr in self.cluster.master_addresses: - mesos_master_url = self._build_url(master_addr, port='5050', - path='/state') - master = jsonutils.loads(urlfetch.get(mesos_master_url)) - if self._is_leader(master): - for slave in master['slaves']: - self.data['mem_total'] += slave['resources']['mem'] - self.data['mem_used'] += slave['used_resources']['mem'] - self.data['cpu_total'] += slave['resources']['cpus'] - self.data['cpu_used'] += slave['used_resources']['cpus'] - break - - def compute_memory_util(self): - if self.data['mem_total'] == 0 or self.data['mem_used'] == 0: - return 0 - else: - return self.data['mem_used'] * 100 / self.data['mem_total'] - - def compute_cpu_util(self): - if self.data['cpu_total'] == 0 or self.data['cpu_used'] == 0: - return 0 - else: - return self.data['cpu_used'] * 100 / self.data['cpu_total'] diff --git a/magnum/drivers/mesos_ubuntu_v1/scale_manager.py b/magnum/drivers/mesos_ubuntu_v1/scale_manager.py deleted file mode 100644 index 4766c00a..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/scale_manager.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from marathon import MarathonClient - -from magnum.conductor.scale_manager import ScaleManager - - -class MesosScaleManager(ScaleManager): - """When scaling a mesos cluster, MesosScaleManager will inspect the - - nodes and find out those with containers on them. Thus we can - ask Heat to delete the nodes without containers. Note that this - is a best effort basis -- Magnum doesn't have any synchronization - with Marathon, so while Magnum is checking for the containers to - choose nodes to remove, new containers can be deployed on the - nodes to be removed. - """ - - def __init__(self, context, osclient, cluster): - super(MesosScaleManager, self).__init__(context, osclient, cluster) - - def _get_hosts_with_container(self, context, cluster): - marathon_client = MarathonClient( - 'http://' + cluster.api_address + ':8080') - hosts = set() - for task in marathon_client.list_tasks(): - hosts.add(task.host) - - return hosts diff --git a/magnum/drivers/mesos_ubuntu_v1/template_def.py b/magnum/drivers/mesos_ubuntu_v1/template_def.py deleted file mode 100644 index 603b16ff..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/template_def.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from magnum.drivers.heat import template_def - - -class UbuntuMesosTemplateDefinition(template_def.BaseTemplateDefinition): - """Mesos template for Ubuntu VM.""" - - def __init__(self): - super(UbuntuMesosTemplateDefinition, self).__init__() - self.add_parameter('external_network', - cluster_template_attr='external_network_id', - required=True) - self.add_parameter('fixed_network', - cluster_template_attr='fixed_network') - self.add_parameter('fixed_subnet', - cluster_template_attr='fixed_subnet') - self.add_parameter('number_of_slaves', - cluster_attr='node_count') - self.add_parameter('master_flavor', - cluster_template_attr='master_flavor_id') - self.add_parameter('slave_flavor', - cluster_template_attr='flavor_id') - self.add_parameter('cluster_name', - cluster_attr='name') - self.add_parameter('volume_driver', - cluster_template_attr='volume_driver') - - self.add_output('api_address', - cluster_attr='api_address') - self.add_output('mesos_master_private', - cluster_attr=None) - self.add_output('mesos_master', - cluster_attr='master_addresses') - self.add_output('mesos_slaves_private', - cluster_attr=None) - self.add_output('mesos_slaves', - cluster_attr='node_addresses') - - def get_params(self, context, cluster_template, cluster, **kwargs): - extra_params = kwargs.pop('extra_params', {}) - # HACK(apmelton) - This uses the user's bearer token, ideally - # it should be replaced with an actual trust token with only - # access to do what the template needs it to do. - osc = self.get_osc(context) - extra_params['auth_url'] = context.auth_url - extra_params['username'] = context.user_name - extra_params['tenant_name'] = context.tenant - extra_params['domain_name'] = context.domain_name - extra_params['region_name'] = osc.cinder_region_name() - - label_list = ['rexray_preempt', 'mesos_slave_isolation', - 'mesos_slave_image_providers', - 'mesos_slave_work_dir', - 'mesos_slave_executor_env_variables'] - - for label in label_list: - extra_params[label] = cluster_template.labels.get(label) - - scale_mgr = kwargs.pop('scale_manager', None) - if scale_mgr: - hosts = self.get_output('mesos_slaves_private') - extra_params['slaves_to_remove'] = ( - scale_mgr.get_removal_nodes(hosts)) - - return super(UbuntuMesosTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=extra_params, - **kwargs) - - def get_env_files(self, cluster_template, cluster): - env_files = [] - - template_def.add_priv_net_env_file(env_files, cluster_template) - template_def.add_lb_env_file(env_files, cluster_template) - - return env_files - - @property - def driver_module_path(self): - return __name__[:__name__.rindex('.')] - - @property - def template_path(self): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'templates/mesoscluster.yaml') diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/add-proxy.sh b/magnum/drivers/mesos_ubuntu_v1/templates/fragments/add-proxy.sh deleted file mode 100644 index 87e85384..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/add-proxy.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh - -. /etc/sysconfig/heat-params - -DOCKER_PROXY_CONF=/etc/default/docker -BASH_RC=/etc/bash.bashrc - -if [ -n "$HTTP_PROXY" ]; then - echo "export http_proxy=$HTTP_PROXY" >> $DOCKER_PROXY_CONF - - if [ -f "$BASH_RC" ]; then - echo "export http_proxy=$HTTP_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting http_proxy" - fi -fi - -if [ -n "$HTTPS_PROXY" ]; then - echo "export https_proxy=$HTTPS_PROXY" >> $DOCKER_PROXY_CONF - - if [ -f $BASH_RC ]; then - echo "export https_proxy=$HTTPS_PROXY" >> $BASH_RC - else - echo "File $BASH_RC does not exist, not setting https_proxy" - fi -fi - -if [ -n "$HTTP_PROXY" -o -n $HTTPS_PROXY ]; then - service docker restart -fi - -if [ -f "$BASH_RC" ]; then - if [ -n "$NO_PROXY" ]; then - echo "export no_proxy=$NO_PROXY" >> $BASH_RC - fi -else - echo "File $BASH_RC does not exist, not setting no_proxy" -fi diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-master.sh b/magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-master.sh deleted file mode 100644 index 00607a60..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-master.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -. /etc/sysconfig/heat-params - -echo "Configuring mesos (master)" - -myip=$(ip addr show eth0 | - awk '$1 == "inet" {print $2}' | cut -f1 -d/) - -# Fix /etc/hosts -sed -i "s/127.0.1.1/$myip/" /etc/hosts - -###################################################################### -# -# Configure ZooKeeper -# - -# List all ZooKeeper nodes -id=1 -for master_ip in $MESOS_MASTERS_IPS; do - echo "server.$((id++))=${master_ip}:2888:3888" >> /etc/zookeeper/conf/zoo.cfg -done - -# Set a ID for this node -id=1 -for master_ip in $MESOS_MASTERS_IPS; do - if [ "$master_ip" = "$myip" ]; then - break - fi - id=$((id+1)) -done -echo "$id" > /etc/zookeeper/conf/myid - -###################################################################### -# -# Configure Mesos -# - -# Set the ZooKeeper URL -zk="zk://" -for master_ip in $MESOS_MASTERS_IPS; do - zk="${zk}${master_ip}:2181," -done -# Remove tailing ',' (format: zk://host1:port1,...,hostN:portN/path) -zk=${zk::-1} -echo "${zk}/mesos" > /etc/mesos/zk - -# The IP address to listen on -echo "$myip" > /etc/mesos-master/ip - -# The size of the quorum of replicas -echo "$QUORUM" > /etc/mesos-master/quorum - -# The hostname advertised in ZooKeeper -echo "$myip" > /etc/mesos-master/hostname - -# The cluster name -echo "$CLUSTER_NAME" > /etc/mesos-master/cluster - -###################################################################### -# -# Configure Marathon -# - -mkdir -p /etc/marathon/conf - -# Set the ZooKeeper URL -echo "${zk}/mesos" > /etc/marathon/conf/master -echo "${zk}/marathon" > /etc/marathon/conf/zk - -# Set the hostname advertised in ZooKeeper -echo "$myip" > /etc/marathon/conf/hostname diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-slave.sh b/magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-slave.sh deleted file mode 100644 index eeffc725..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/configure-mesos-slave.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -. /etc/sysconfig/heat-params - -echo "Configuring mesos (slave)" - -myip=$(ip addr show eth0 | - awk '$1 == "inet" {print $2}' | cut -f1 -d/) - -zk="" -for master_ip in $MESOS_MASTERS_IPS; do - zk="${zk}${master_ip}:2181," -done -# Remove last ',' -zk=${zk::-1} -# Zookeeper URL. This specifies how to connect to a quorum of masters -# Format: zk://host1:port1,...,hostN:portN/path -echo "zk://${zk}/mesos" > /etc/mesos/zk - -# The hostname the slave should report -echo "$myip" > /etc/mesos-slave/hostname - -# The IP address to listen on -echo "$myip" > /etc/mesos-slave/ip - -# List of containerizer implementations -echo "docker,mesos" > /etc/mesos-slave/containerizers - -# Amount of time to wait for an executor to register -cat > /etc/mesos-slave/executor_registration_timeout < /etc/mesos-slave/isolation -fi - -if [ -n "$WORK_DIR" ]; then - echo "$WORK_DIR" > /etc/mesos-slave/work_dir -fi - -if [ -n "$IMAGE_PROVIDERS" ]; then - if [ -n "$ISOLATION" ]; then - echo "$IMAGE_PROVIDERS" > /etc/mesos-slave/image_providers - else - echo "isolation doesn't exist, not setting image_providers" - fi -fi - -if [ -n "$EXECUTOR_ENVIRONMENT_VARIABLES" ]; then - echo "$EXECUTOR_ENVIRONMENT_VARIABLES" > /etc/executor_environment_variables - echo "file:///etc/executor_environment_variables" > /etc/mesos-slave/executor_environment_variables -fi diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-master.sh b/magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-master.sh deleted file mode 100644 index a1043ed3..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-master.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# Start master services -for service in zookeeper mesos-master marathon; do - echo "starting service $service" - service $service start - rm -f /etc/init/$service.override -done diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-slave.sh b/magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-slave.sh deleted file mode 100644 index d3bee134..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/start-services-slave.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# Start slave services -for service in docker mesos-slave; do - echo "starting service $service" - service $service start - rm -f /etc/init/$service.override -done diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/volume-service.sh b/magnum/drivers/mesos_ubuntu_v1/templates/fragments/volume-service.sh deleted file mode 100644 index 0f19d916..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/fragments/volume-service.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh -. /etc/sysconfig/heat-params - -# Judge whether to install the rexray driver -if [ "$VOLUME_DRIVER" != "rexray" ]; then - exit 0 -fi - -# NOTE(yatin): "openstack" storageDriver is not supported in latest version -# of rexray. So use stable version 0.3.3. Once it is supported by rexray: -# http://rexray.readthedocs.io/en/stable/, we can revert this commit. -curl -sSL https://dl.bintray.com/emccode/rexray/install | bash -s -- stable 0.3.3 - -CLOUD_CONFIG=/etc/rexray/config.yml -CLOUD=/etc/rexray - -if [ ! -d ${CLOUD_CONFIG} -o ! -d ${CLOUD} ]; then - mkdir -p $CLOUD -fi - -if [ ${AUTH_URL##*/}=="v3" ]; then - extra_configs="domainName: $DOMAIN_NAME" -fi - -cat > $CLOUD_CONFIG < /etc/sysconfig/heat-params < - This is a nested stack that defines software configs for Mesos slave. - -parameters: - - executor_registration_timeout: - type: string - description: > - Amount of time to wait for an executor to register with the slave before - considering it hung and shutting it down - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - auth_url: - type: string - description: > - url for mesos to authenticate before sending request - - username: - type: string - description: user name - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for Kubernetes config file - hidden: true - - tenant_name: - type: string - description: > - tenant_name is used to isolate access to Compute resources - - volume_driver: - type: string - description: volume driver to use for container storage - - region_name: - type: string - description: A logically separate section of the cluster - - domain_name: - type: string - description: > - domain is to define the administrative boundaries for management - of Keystone entities - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - - mesos_slave_isolation: - type: string - description: > - Isolation mechanisms to use, e.g., `posix/cpu,posix/mem`, or - `cgroups/cpu,cgroups/mem`, or network/port_mapping (configure with flag: - `--with-network-isolator` to enable), or `cgroups/devices/gpus/nvidia` - for nvidia specific gpu isolation (configure with flag: `--enable-nvidia - -gpu-support` to enable), or `external`, or load an alternate isolator - module using the `--modules` flag. Note that this flag is only relevant - for the Mesos Containerizer. - - mesos_slave_work_dir: - type: string - description: directory path to place framework work directories - - mesos_slave_image_providers: - type: string - description: > - Comma separated list of supported image providers e.g., - APPC,DOCKER - - mesos_slave_executor_env_variables: - type: string - description: > - JSON object representing the environment variables that should be passed - to the executor, and thus subsequently task(s). By default the executor, - executor will inherit the slave's environment variables. - - mesos_masters_ips: - type: string - description: IP addresses of the Mesos master servers. - - mesos_slave_wc_curl_cli: - type: string - description: Wait condition notify command for slave. - -resources: - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params.yaml} - params: - "$MESOS_MASTERS_IPS": {get_param: mesos_masters_ips} - "$EXECUTOR_REGISTRATION_TIMEOUT": {get_param: executor_registration_timeout} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$AUTH_URL": {get_param: auth_url} - "$USERNAME": {get_param: username} - "$PASSWORD": {get_param: password} - "$TENANT_NAME": {get_param: tenant_name} - "$VOLUME_DRIVER": {get_param: volume_driver} - "$REGION_NAME": {get_param: region_name} - "$DOMAIN_NAME": {get_param: domain_name} - "$REXRAY_PREEMPT": {get_param: rexray_preempt} - "$ISOLATION": {get_param: mesos_slave_isolation} - "$WORK_DIR": {get_param: mesos_slave_work_dir} - "$IMAGE_PROVIDERS": {get_param: mesos_slave_image_providers} - "$EXECUTOR_ENVIRONMENT_VARIABLES": {get_param: mesos_slave_executor_env_variables} - - configure_mesos_slave: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/configure-mesos-slave.sh} - - start_services: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/start-services-slave.sh} - - slave_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - #!/bin/bash -v - wc_notify --data-binary '{"status": "SUCCESS"}' - params: - wc_notify: {get_param: mesos_slave_wc_curl_cli} - - add_proxy: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/add-proxy.sh} - - volume_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/volume-service.sh} - - mesos_slave_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: write_heat_params} - - config: {get_resource: configure_mesos_slave} - - config: {get_resource: add_proxy} - - config: {get_resource: volume_service} - - config: {get_resource: start_services} - - config: {get_resource: slave_wc_notify} - -outputs: - mesos_init: - value: {get_resource: mesos_slave_init} - description: ID of the multipart mime. diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/mesoscluster.yaml b/magnum/drivers/mesos_ubuntu_v1/templates/mesoscluster.yaml deleted file mode 100644 index 7e1d114f..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/mesoscluster.yaml +++ /dev/null @@ -1,482 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This template will boot a Mesos cluster with one or more masters - (as specified by number_of_masters, default is 1) and one or more slaves - (as specified by the number_of_slaves parameter, which - defaults to 1). - -parameters: - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - default: public - - fixed_network: - type: string - description: uuid/name of an existing network to use to provision machines - default: "" - - fixed_subnet: - type: string - description: uuid/name of an existing subnet to use to provision machines - default: "" - - server_image: - type: string - default: ubuntu-mesos - description: glance image used to boot the server - - master_flavor: - type: string - default: m1.small - description: flavor to use when booting the master server - - slave_flavor: - type: string - default: m1.small - description: flavor to use when booting the slave server - - dns_nameserver: - type: string - description: address of a dns nameserver reachable in your environment - default: 8.8.8.8 - - number_of_slaves: - type: number - description: how many mesos slaves to spawn initially - default: 1 - - fixed_network_cidr: - type: string - description: network range for fixed ip network - default: 10.0.0.0/24 - - wait_condition_timeout: - type: number - description: > - timeout for the Wait Conditions - default: 6000 - - cluster_name: - type: string - description: human readable name for the mesos cluster - default: my-cluster - - executor_registration_timeout: - type: string - description: > - Amount of time to wait for an executor to register with the slave before - considering it hung and shutting it down - default: 5mins - - number_of_masters: - type: number - description: how many mesos masters to spawn initially - default: 1 - - http_proxy: - type: string - description: http proxy address for docker - default: "" - - https_proxy: - type: string - description: https proxy address for docker - default: "" - - no_proxy: - type: string - description: no proxies for docker - default: "" - - trustee_domain_id: - type: string - description: domain id of the trustee - default: "" - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_username: - type: string - description: username of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - region_name: - type: string - description: a logically separate section of the cluster - - username: - type: string - description: user name - - password: - type: string - description: > - user password, not set in current implementation, only used to - fill in for Mesos config file - default: - password - hidden: true - - tenant_name: - type: string - description: > - tenant_name is used to isolate access to Compute resources - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - domain_name: - type: string - description: > - domain is to define the administrative boundaries for management - of Keystone entities - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - default: "false" - - auth_url: - type: string - description: url for keystone - - mesos_slave_isolation: - type: string - description: > - Isolation mechanisms to use, e.g., `posix/cpu,posix/mem`, or - `cgroups/cpu,cgroups/mem`, or network/port_mapping (configure with flag: - `--with-network-isolator` to enable), or `cgroups/devices/gpus/nvidia` - for nvidia specific gpu isolation (configure with flag: `--enable-nvidia - -gpu-support` to enable), or `external`, or load an alternate isolator - module using the `--modules` flag. Note that this flag is only relevant - for the Mesos Containerizer. - default: "" - - mesos_slave_work_dir: - type: string - description: directory path to place framework work directories - default: "" - - mesos_slave_image_providers: - type: string - description: > - Comma separated list of supported image providers e.g., - APPC,DOCKER - default: "" - - mesos_slave_executor_env_variables: - type: string - description: > - JSON object representing the environment variables that should be passed - to the executor, and thus subsequently task(s). By default the executor, - executor will inherit the slave's environment variables. - default: "" - - slaves_to_remove: - type: comma_delimited_list - description: > - List of slaves to be removed when doing an update. Individual slave may - be referenced several ways: (1) The resource name (e.g.['1', '3']), - (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should - be empty when doing a create. - default: [] - -resources: - - ###################################################################### - # - # network resources. allocate a network and router for our server. - # - - network: - type: ../../common/templates/network.yaml - properties: - existing_network: {get_param: fixed_network} - existing_subnet: {get_param: fixed_subnet} - private_network_cidr: {get_param: fixed_network_cidr} - dns_nameserver: {get_param: dns_nameserver} - external_network: {get_param: external_network} - - api_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - protocol: HTTP - port: 8080 - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup_master: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - remote_mode: remote_group_id - - protocol: tcp - port_range_min: 5050 - port_range_max: 5050 - - protocol: tcp - port_range_min: 8080 - port_range_max: 8080 - - secgroup_slave_all_open: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # Master SoftwareConfig. - # - - write_params_master: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: fragments/write-heat-params-master.sh} - inputs: - - name: MESOS_MASTERS_IPS - type: String - - name: CLUSTER_NAME - type: String - - name: QUORUM - type: String - - name: HTTP_PROXY - type: String - - name: HTTPS_PROXY - type: String - - name: NO_PROXY - type: String - - configure_master: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: fragments/configure-mesos-master.sh} - - add_proxy_master: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: fragments/add-proxy.sh} - - start_services_master: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: fragments/start-services-master.sh} - - ###################################################################### - # - # Master SoftwareDeployment. - # - - write_params_master_deployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - config: {get_resource: write_params_master} - servers: {get_attr: [mesos_masters, attributes, mesos_server_id]} - input_values: - MESOS_MASTERS_IPS: {list_join: [' ', {get_attr: [mesos_masters, mesos_master_ip]}]} - CLUSTER_NAME: {get_param: cluster_name} - NUMBER_OF_MASTERS: {get_param: number_of_masters} - HTTP_PROXY: {get_param: http_proxy} - HTTPS_PROXY: {get_param: https_proxy} - NO_PROXY: {get_param: no_proxy} - - configure_master_deployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: - - write_params_master_deployment - properties: - config: {get_resource: configure_master} - servers: {get_attr: [mesos_masters, attributes, mesos_server_id]} - - add_proxy_master_deployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: - - configure_master_deployment - properties: - config: {get_resource: add_proxy_master} - servers: {get_attr: [mesos_masters, attributes, mesos_server_id]} - - start_services_master_deployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: - - add_proxy_master_deployment - properties: - config: {get_resource: start_services_master} - servers: {get_attr: [mesos_masters, attributes, mesos_server_id]} - - ###################################################################### - # - # resources that expose the IPs of either the mesos master or a given - # LBaaS pool depending on whether LBaaS is enabled for the bay. - # - - api_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_attr: [api_lb, floating_address]} - pool_private_ip: {get_attr: [api_lb, address]} - master_public_ip: {get_attr: [mesos_masters, resource.0.mesos_master_external_ip]} - master_private_ip: {get_attr: [mesos_masters, resource.0.mesos_master_ip]} - - ###################################################################### - # - # Mesos masters. This is a resource group that will create - # masters. - # - - mesos_masters: - type: OS::Heat::ResourceGroup - depends_on: - - network - properties: - count: {get_param: number_of_masters} - resource_def: - type: mesosmaster.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - master_flavor: {get_param: master_flavor} - external_network: {get_param: external_network} - fixed_network: {get_attr: [network, fixed_network]} - fixed_subnet: {get_attr: [network, fixed_subnet]} - secgroup_mesos_id: {get_resource: secgroup_master} - api_pool_id: {get_attr: [api_lb, pool_id]} - - ###################################################################### - # - # Mesos slaves. This is a resource group that will initially - # create slaves, and needs to be manually scaled. - # - - mesos_slaves: - type: OS::Heat::ResourceGroup - depends_on: - - network - properties: - count: {get_param: number_of_slaves} - removal_policies: [{resource_list: {get_param: slaves_to_remove}}] - resource_def: - type: mesosslave.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - slave_flavor: {get_param: slave_flavor} - fixed_network: {get_attr: [network, fixed_network]} - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - secgroup_slave_all_open_id: {get_resource: secgroup_slave_all_open} - mesos_slave_software_configs: {get_attr: [mesos_slave_software_configs, mesos_init]} - - ###################################################################### - # - # Wait condition handler for Mesos slaves. - # - - slave_wait_handle: - type: OS::Heat::WaitConditionHandle - - slave_wait_condition: - type: OS::Heat::WaitCondition - properties: - count: {get_param: number_of_slaves} - handle: {get_resource: slave_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # Software configs for Mesos slaves. - # - - mesos_slave_software_configs: - type: mesos_slave_software_configs.yaml - properties: - mesos_masters_ips: {list_join: [' ', {get_attr: [mesos_masters, mesos_master_ip]}]} - executor_registration_timeout: {get_param: executor_registration_timeout} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - auth_url: {get_param: auth_url} - username: {get_param: username} - password: {get_param: password} - tenant_name: {get_param: tenant_name} - volume_driver: {get_param: volume_driver} - region_name: {get_param: region_name} - domain_name: {get_param: domain_name} - rexray_preempt: {get_param: rexray_preempt} - mesos_slave_isolation: {get_param: mesos_slave_isolation} - mesos_slave_work_dir: {get_param: mesos_slave_work_dir} - mesos_slave_image_providers: {get_param: mesos_slave_image_providers} - mesos_slave_executor_env_variables: {get_param: mesos_slave_executor_env_variables} - mesos_slave_wc_curl_cli: {get_attr: [slave_wait_handle, curl_cli]} - -outputs: - - api_address: - value: {get_attr: [api_address_lb_switch, public_ip]} - description: > - This is the API endpoint of the Mesos master. Use this to access - the Mesos API from outside the cluster. - - mesos_master_private: - value: {get_attr: [mesos_masters, mesos_master_ip]} - description: > - This is a list of the "private" addresses of all the Mesos masters. - - mesos_master: - value: {get_attr: [mesos_masters, mesos_master_external_ip]} - description: > - This is the "public" ip address of the Mesos master server. Use this address to - log in to the Mesos master via ssh or to access the Mesos API - from outside the cluster. - - mesos_slaves_private: - value: {get_attr: [mesos_slaves, mesos_slave_ip]} - description: > - This is a list of the "private" addresses of all the Mesos slaves. - - mesos_slaves: - value: {get_attr: [mesos_slaves, mesos_slave_external_ip]} - description: > - This is a list of the "public" addresses of all the Mesos slaves. diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/mesosmaster.yaml b/magnum/drivers/mesos_ubuntu_v1/templates/mesosmaster.yaml deleted file mode 100644 index f2d636e3..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/mesosmaster.yaml +++ /dev/null @@ -1,99 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Mesos master, This stack is - included by a ResourceGroup resource in the parent template - (mesoscluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - secgroup_mesos_id: - type: string - description: ID of the security group for mesos master. - - api_pool_id: - type: string - description: ID of the load balancer pool of Marathon. - -resources: - - ###################################################################### - # - # Mesos master server. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - mesos-master: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: master_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: SOFTWARE_CONFIG - networks: - - port: {get_resource: mesos_master_eth0} - - mesos_master_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - {get_param: secgroup_mesos_id} - fixed_ips: - - subnet: {get_param: fixed_subnet} - replacement_policy: AUTO - - mesos_master_floating: - type: OS::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: mesos_master_eth0} - - api_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_id} - address: {get_attr: [mesos_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet } - protocol_port: 8080 - -outputs: - - mesos_master_ip: - value: {get_attr: [mesos_master_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" address of the Mesos master node. - mesos_master_external_ip: - value: {get_attr: [mesos_master_floating, floating_ip_address]} - description: > - This is the "public" address of the Mesos master node. - mesos_server_id: - value: {get_resource: mesos-master} - description: > - This is the logical id of the Mesos master node. diff --git a/magnum/drivers/mesos_ubuntu_v1/templates/mesosslave.yaml b/magnum/drivers/mesos_ubuntu_v1/templates/mesosslave.yaml deleted file mode 100644 index 43f1a5e6..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/templates/mesosslave.yaml +++ /dev/null @@ -1,88 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single Mesos slave, This stack is - included by a ResourceGroup resource in the parent template - (mesoscluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - slave_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - secgroup_slave_all_open_id: - type: string - description: ID of the security group for slave. - - mesos_slave_software_configs: - type: string - description: ID of the multipart mime. - -resources: - - ###################################################################### - # - # a single Mesos slave. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - mesos-slave: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: slave_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_param: mesos_slave_software_configs} - networks: - - port: {get_resource: mesos_slave_eth0} - - mesos_slave_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - get_param: secgroup_slave_all_open_id - fixed_ips: - - subnet: {get_param: fixed_subnet} - replacement_policy: AUTO - - mesos_slave_floating: - type: OS::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: mesos_slave_eth0} - -outputs: - - mesos_slave_ip: - value: {get_attr: [mesos_slave_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" address of the Mesos slave node. - mesos_slave_external_ip: - value: {get_attr: [mesos_slave_floating, floating_ip_address]} - description: > - This is the "public" address of the Mesos slave node. diff --git a/magnum/drivers/mesos_ubuntu_v1/version.py b/magnum/drivers/mesos_ubuntu_v1/version.py deleted file mode 100644 index 04edd790..00000000 --- a/magnum/drivers/mesos_ubuntu_v1/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 - Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version = '1.0.0' -driver = 'mesos_ubuntu_v1' -container_version = '1.9.1' diff --git a/magnum/drivers/swarm_fedora_atomic_v1/__init__.py b/magnum/drivers/swarm_fedora_atomic_v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/swarm_fedora_atomic_v1/driver.py b/magnum/drivers/swarm_fedora_atomic_v1/driver.py deleted file mode 100644 index 29bf236b..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/driver.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.drivers.heat import driver -from magnum.drivers.swarm_fedora_atomic_v1 import monitor -from magnum.drivers.swarm_fedora_atomic_v1 import template_def - - -class Driver(driver.HeatDriver): - - @property - def provides(self): - return [ - {'server_type': 'vm', - 'os': 'fedora-atomic', - 'coe': 'swarm'}, - ] - - def get_template_definition(self): - return template_def.AtomicSwarmTemplateDefinition() - - def get_monitor(self, context, cluster): - return monitor.SwarmMonitor(context, cluster) diff --git a/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/Dockerfile b/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/Dockerfile deleted file mode 100644 index 7ae36af5..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM fedora:23 -MAINTAINER Ton Ngo "ton@us.ibm.com" -WORKDIR / -RUN dnf -y install openvswitch \ - openstack-neutron-ml2 \ - openstack-neutron-openvswitch \ - bridge-utils \ - git \ - && dnf clean all -RUN cd /opt \ - && git clone https://git.openstack.org/openstack/neutron \ - && cp neutron/etc/policy.json /etc/neutron/. \ - && rm -rf neutron \ - && dnf -y remove git -VOLUME /var/run/openvswitch -ADD run_openvswitch_neutron.sh /usr/bin/run_openvswitch_neutron.sh - -CMD ["/usr/bin/run_openvswitch_neutron.sh"] diff --git a/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/README.rst b/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/README.rst deleted file mode 100644 index b368cf0e..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/README.rst +++ /dev/null @@ -1,68 +0,0 @@ -=================== -Neutron Openvswitch -=================== - -This Dockerfile creates a Docker image based on Fedora 23 that runs -Openvswitch and the Neutron L2 agent for Openvswitch. This container -image is used by Magnum when a Swarm cluster is deployed with the -attribute:: - - --network-driver=kuryr - -Magnum deploys this container on each Swarm node along with the -Kuryr container to support Docker advanced networking based on -the `Container Networking Model -`_. - -To build the image, run this command in the same directory as the -Dockerfile:: - - docker build -t openstackmagnum/fedora23-neutron-ovs:testing . - -This image is available on Docker Hub as:: - - openstackmagnum/fedora23-neutron-ovs:testing - -To update the image with a new build:: - - docker push openstackmagnum/fedora23-neutron-ovs:testing - -The 'testing' tag may be replaced with 'latest' or other tag as -needed. - -This image is intended to run on the Fedora Atomic public image which -by default does not have these packages installed. The common -practice for Atomic OS is to run new packages in containers rather -than installing them in the OS. - -For the Neutron agent, you will need to provide 3 files at these -locations: - -- /etc/neutron/neutron.conf -- /etc/neutron/policy.json -- /etc/neutron/plugins/ml2/ml2_conf.ini - -These files are typically installed in the same locations on the -Neutron controller node. The policy.json file is copied into the -Docker image because it is fairly static and does not require -customization for the cluster. If it is changed in the Neutron master -repo, you just need to rebuild the Docker image to update the file. -Magnum will create the other 2 files on each cluster node in the -directory /etc/kuryr and map them to the proper directories in -the container using the Docker -v option. - -Since Openvswitch needs to operate on the host network name space, -the Docker container will need the -net=host option. -The /var/run/openvswitch directory is also mapped to the cluster node -so that the Kuryr container can talk to openvswitch. -To run the image from Fedora Atomic:: - - docker run --net=host \ - --cap-add=NET_ADMIN \ - --privileged=true \ - -v /var/run/openvswitch:/var/run/openvswitch \ - -v /lib/modules:/lib/modules:ro \ - -v /etc/kuryr/neutron.conf:/etc/neutron/neutron.conf \ - -v /etc/kuryr/ml2_conf.ini:/etc/neutron/plugins/ml2/ml2_conf.ini \ - --name openvswitch-agent \ - openstackmagnum/fedora23-neutron-ovs:testing diff --git a/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/run_openvswitch_neutron.sh b/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/run_openvswitch_neutron.sh deleted file mode 100755 index 6893ccc8..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/image/openvswitch/run_openvswitch_neutron.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -/usr/share/openvswitch/scripts/ovs-ctl start --system-id=random -/usr/bin/neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --log-file /var/log/neutron/openvswitch-agent.log diff --git a/magnum/drivers/swarm_fedora_atomic_v1/monitor.py b/magnum/drivers/swarm_fedora_atomic_v1/monitor.py deleted file mode 100755 index bb6057f3..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/monitor.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log - -from magnum.common import docker_utils -from magnum.conductor import monitors - -LOG = log.getLogger(__name__) - - -class SwarmMonitor(monitors.MonitorBase): - - def __init__(self, context, cluster): - super(SwarmMonitor, self).__init__(context, cluster) - self.data = {} - self.data['nodes'] = [] - self.data['containers'] = [] - - @property - def metrics_spec(self): - return { - 'memory_util': { - 'unit': '%', - 'func': 'compute_memory_util', - }, - } - - def pull_data(self): - with docker_utils.docker_for_cluster(self.context, - self.cluster) as docker: - system_info = docker.info() - self.data['nodes'] = self._parse_node_info(system_info) - - # pull data from each container - containers = [] - for container in docker.containers(all=True): - try: - container = docker.inspect_container(container['Id']) - except Exception as e: - LOG.warning("Ignore error [%(e)s] when inspecting " - "container %(container_id)s.", - {'e': e, 'container_id': container['Id']}, - exc_info=True) - containers.append(container) - self.data['containers'] = containers - - def compute_memory_util(self): - mem_total = 0 - for node in self.data['nodes']: - mem_total += node['MemTotal'] - mem_reserved = 0 - for container in self.data['containers']: - mem_reserved += container['HostConfig']['Memory'] - - if mem_total == 0: - return 0 - else: - return mem_reserved * 100 / mem_total - - def _parse_node_info(self, system_info): - """Parse system_info to retrieve memory size of each node. - - :param system_info: The output returned by docker.info(). Example: - { - u'Debug': False, - u'NEventsListener': 0, - u'DriverStatus': [ - [u'\x08Strategy', u'spread'], - [u'\x08Filters', u'...'], - [u'\x08Nodes', u'2'], - [u'node1', u'10.0.0.4:2375'], - [u' \u2514 Containers', u'1'], - [u' \u2514 Reserved CPUs', u'0 / 1'], - [u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'], - [u'node2', u'10.0.0.3:2375'], - [u' \u2514 Containers', u'2'], - [u' \u2514 Reserved CPUs', u'0 / 1'], - [u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'] - ], - u'Containers': 3 - } - :return: Memory size of each node. Excample: - [{'MemTotal': 2203318222.848}, - {'MemTotal': 2203318222.848}] - """ - nodes = [] - for info in system_info['DriverStatus']: - key = info[0] - value = info[1] - if key == u' \u2514 Reserved Memory': - memory = value # Example: '0 B / 2.052 GiB' - memory = memory.split('/')[1].strip() # Example: '2.052 GiB' - memory = memory.split(' ')[0] # Example: '2.052' - memory = float(memory) * 1024 * 1024 * 1024 - nodes.append({'MemTotal': memory}) - return nodes diff --git a/magnum/drivers/swarm_fedora_atomic_v1/template_def.py b/magnum/drivers/swarm_fedora_atomic_v1/template_def.py deleted file mode 100644 index 13d71489..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/template_def.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from magnum.drivers.heat import swarm_fedora_template_def as sftd - - -class AtomicSwarmTemplateDefinition(sftd.SwarmFedoraTemplateDefinition): - """Docker swarm template for a Fedora Atomic VM.""" - - @property - def driver_module_path(self): - return __name__[:__name__.rindex('.')] - - @property - def template_path(self): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'templates/cluster.yaml') diff --git a/magnum/drivers/swarm_fedora_atomic_v1/templates/COPYING b/magnum/drivers/swarm_fedora_atomic_v1/templates/COPYING deleted file mode 100644 index d6456956..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/templates/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/magnum/drivers/swarm_fedora_atomic_v1/templates/README.md b/magnum/drivers/swarm_fedora_atomic_v1/templates/README.md deleted file mode 100644 index 5e45fdc8..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/templates/README.md +++ /dev/null @@ -1,107 +0,0 @@ -A Docker swarm cluster with Heat -============================== - -These [Heat][] templates will deploy an *N*-node [swarm][] cluster, -where *N* is the value of the `number_of_nodes` parameter you -specify when creating the stack. - -[heat]: https://wiki.openstack.org/wiki/Heat -[swarm]: https://github.com/docker/swarm/ - -## Requirements - -### OpenStack - -These templates will work with the Juno version of Heat. - -### Guest image - -These templates will work with either CentOS Atomic Host or Fedora 21 -Atomic. - -## Creating the stack - -First, you must create a swarm token, which is used to uniquely identify -the cluster to the global discovery service. This can be done by issuing -a create call to the swarm CLI. Alternatively, if you have access to -Docker you can use the dockerswarm/swarm image. - - $ swarm create - afeb445bcb2f573aeb8ff3a199785f45 - - $ docker run dockerswarm/swarm create - d8cdfe5128af6e1075b34aa06ff1cc2c - -Creating an environment file `local.yaml` with parameters specific to -your environment: - - parameters: - ssh_key_name: testkey - external_network: 028d70dd-67b8-4901-8bdd-0c62b06cce2d - dns_nameserver: 192.168.200.1 - server_image: fedora-atomic-latest - discovery_url: token://d8cdfe5128af6e1075b34aa06ff1cc2c - -And then create the stack, referencing that environment file: - - heat stack-create -f swarm.yaml -e local.yaml my-swarm-cluster - -You must provide values for: - -- `ssh_key_name` -- `external_network` -- `server_image` -- `discovery_url` - -## Interacting with Swarm - -The Docker CLI interacts with the cluster through the swarm master -listening on port 2376. - -You can get the ip address of the swarm master using the `heat -output-show` command: - - $ heat output-show my-swarm-cluster swarm_master - "192.168.200.86" - -Provide the Docker CLI with the address for the swarm master. - - $ docker -H tcp://192.168.200.86:2376 info - Containers: 4 - Nodes: 3 - swarm-master: 10.0.0.1:2375 - swarm-node1: 10.0.0.2:2375 - swarm-node2: 10.0.0.3:2375 - -## Testing - -You can test the swarm cluster with the Docker CLI by running a container. -In the example below, a container is spawned in the cluster to ping 8.8.8.8. - - $ docker -H tcp://192.168.200.86:2376 run -i cirros /bin/ping -c 4 8.8.8.8 - PING 8.8.8.8 (8.8.8.8): 56 data bytes - 64 bytes from 8.8.8.8: seq=0 ttl=127 time=40.749 ms - 64 bytes from 8.8.8.8: seq=1 ttl=127 time=46.264 ms - 64 bytes from 8.8.8.8: seq=2 ttl=127 time=42.808 ms - 64 bytes from 8.8.8.8: seq=3 ttl=127 time=42.270 ms - - --- 8.8.8.8 ping statistics --- - 4 packets transmitted, 4 packets received, 0% packet loss - round-trip min/avg/max = 40.749/43.022/46.264 ms - -## License - -Copyright 2014 Lars Kellogg-Stedman -Copyright 2015 Rackspace Hosting - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use these files except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/magnum/drivers/swarm_fedora_atomic_v1/templates/cluster.yaml b/magnum/drivers/swarm_fedora_atomic_v1/templates/cluster.yaml deleted file mode 100644 index bc1927c1..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/templates/cluster.yaml +++ /dev/null @@ -1,475 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This template will boot a Docker swarm cluster. A swarm cluster is made up - of several master nodes, and N agent nodes. Every node in the cluster, - including the master, is running a Docker daemon and a swarm agent - advertising it to the cluster. The master is running an addition swarm - master container listening on port 2376. By default, the cluster is made - up of one master node and one agent node. - -parameters: - - # - # REQUIRED PARAMETERS - # - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network: - type: string - description: uuid/name of an existing network to use to provision machines - default: "" - - fixed_subnet: - type: string - description: uuid/name of an existing subnet to use to provision machines - default: "" - - discovery_url: - type: string - description: url provided for node discovery - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - server_image: - type: string - description: glance image used to boot the server - - # - # OPTIONAL PARAMETERS - # - master_flavor: - type: string - default: m1.small - description: flavor to use when booting the swarm master - - node_flavor: - type: string - default: m1.small - description: flavor to use when booting the swarm node - - dns_nameserver: - type: string - description: address of a dns nameserver reachable in your environment - default: 8.8.8.8 - - http_proxy: - type: string - description: http proxy address for docker - default: "" - - https_proxy: - type: string - description: https proxy address for docker - default: "" - - no_proxy: - type: string - description: no proxies for docker - default: "" - - number_of_masters: - type: number - description: how many swarm masters to spawn - default: 1 - - number_of_nodes: - type: number - description: how many swarm nodes to spawn - default: 1 - - fixed_network_cidr: - type: string - description: network range for fixed ip network - default: "10.0.0.0/24" - - tls_disabled: - type: boolean - description: whether or not to enable TLS - default: False - - network_driver: - type: string - description: network driver to use for instantiating container networks - default: None - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - default: 10.100.0.0/16 - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each master - default: 24 - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - default: "udp" - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - default: 0 - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - default: "devicemapper" - constraints: - - allowed_values: ["devicemapper", "overlay"] - - loadbalancing_protocol: - type: string - description: > - The protocol which is used for load balancing. If you want to change - tls_disabled option to 'True', please change this to "HTTP". - default: TCP - constraints: - - allowed_values: ["TCP", "HTTP"] - - swarm_port: - type: number - description: > - The port which are used by swarm manager to provide swarm service. - default: 2376 - - swarm_version: - type: string - description: version of swarm used for swarm cluster - default: 1.2.5 - - swarm_strategy: - type: string - description: > - schedule strategy to be used by swarm manager - default: "spread" - - trustee_domain_id: - type: string - description: domain id of the trustee - default: "" - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_username: - type: string - description: username of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - auth_url: - type: string - description: url for keystone - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - default: false - - registry_port: - type: number - description: port of registry service - default: 5000 - - swift_region: - type: string - description: region of swift service - default: "" - - registry_container: - type: string - description: > - name of swift container which docker registry stores images in - default: "container" - - registry_insecure: - type: boolean - description: > - indicates whether to skip TLS verification between registry and backend storage - default: true - - registry_chunksize: - type: number - description: > - size fo the data segments for the swift dynamic large objects - default: 5242880 - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - constraints: - - allowed_values: ["","rexray"] - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - default: "false" - - -resources: - - ###################################################################### - # - # network resources. allocate a network and router for our server. - # it would also be possible to take advantage of existing network - # resources (and have the deployer provide network and subnet ids, - # etc, as parameters), but I wanted to minmize the amount of - # configuration necessary to make this go. - - network: - type: ../../common/templates/network.yaml - properties: - existing_network: {get_param: fixed_network} - existing_subnet: {get_param: fixed_subnet} - private_network_cidr: {get_param: fixed_network_cidr} - dns_nameserver: {get_param: dns_nameserver} - external_network: {get_param: external_network} - - api_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - protocol: {get_param: loadbalancing_protocol} - port: {get_param: swarm_port} - - etcd_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - protocol: HTTP - port: 2379 - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup_swarm_manager: - type: "OS::Neutron::SecurityGroup" - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 2376 - port_range_max: 2376 - - protocol: tcp - remote_ip_prefix: {get_param: fixed_network_cidr} - port_range_min: 1 - port_range_max: 65535 - - protocol: udp - port_range_min: 53 - port_range_max: 53 - - secgroup_swarm_node: - type: "OS::Neutron::SecurityGroup" - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # resources that expose the IPs of either the swarm master or a given - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_attr: [api_lb, floating_address]} - pool_private_ip: {get_attr: [api_lb, address]} - master_public_ip: {get_attr: [swarm_masters, resource.0.swarm_master_external_ip]} - master_private_ip: {get_attr: [swarm_masters, resource.0.swarm_master_ip]} - - etcd_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_private_ip: {get_attr: [etcd_lb, address]} - master_private_ip: {get_attr: [swarm_masters, resource.0.swarm_master_ip]} - - ###################################################################### - # - # Swarm manager is responsible for the entire cluster and manages the - # resources of multiple Docker hosts at scale. - # It supports high availability by create a primary manager and multiple - # replica instances. - - swarm_masters: - type: "OS::Heat::ResourceGroup" - depends_on: - - network - properties: - count: {get_param: number_of_masters} - resource_def: - type: swarmmaster.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - server_flavor: {get_param: master_flavor} - docker_volume_size: {get_param: docker_volume_size} - docker_volume_type: {get_param: docker_volume_type} - docker_storage_driver: {get_param: docker_storage_driver} - fixed_network_id: {get_attr: [network, fixed_network]} - fixed_subnet_id: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - discovery_url: {get_param: discovery_url} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - swarm_api_ip: {get_attr: [api_lb, address]} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - tls_disabled: {get_param: tls_disabled} - secgroup_swarm_master_id: {get_resource: secgroup_swarm_manager} - network_driver: {get_param: network_driver} - flannel_network_cidr: {get_param: flannel_network_cidr} - flannel_network_subnetlen: {get_param: flannel_network_subnetlen} - flannel_backend: {get_param: flannel_backend} - swarm_port: {get_param: swarm_port} - api_pool_id: {get_attr: [api_lb, pool_id]} - etcd_pool_id: {get_attr: [etcd_lb, pool_id]} - etcd_server_ip: {get_attr: [etcd_lb, address]} - api_ip_address: {get_attr: [api_lb, floating_address]} - swarm_version: {get_param: swarm_version} - swarm_strategy: {get_param: swarm_strategy} - trustee_user_id: {get_param: trustee_user_id} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - volume_driver: {get_param: volume_driver} - rexray_preempt: {get_param: rexray_preempt} - - swarm_nodes: - type: "OS::Heat::ResourceGroup" - depends_on: - - network - properties: - count: {get_param: number_of_nodes} - resource_def: - type: swarmnode.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - server_flavor: {get_param: node_flavor} - docker_volume_size: {get_param: docker_volume_size} - docker_volume_type: {get_param: docker_volume_type} - docker_storage_driver: {get_param: docker_storage_driver} - fixed_network_id: {get_attr: [network, fixed_network]} - fixed_subnet_id: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - swarm_api_ip: {get_attr: [api_address_lb_switch, private_ip]} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - tls_disabled: {get_param: tls_disabled} - secgroup_swarm_node_id: {get_resource: secgroup_swarm_node} - flannel_network_cidr: {get_param: flannel_network_cidr} - network_driver: {get_param: network_driver} - etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} - api_ip_address: {get_attr: [api_address_lb_switch, public_ip]} - swarm_version: {get_param: swarm_version} - trustee_domain_id: {get_param: trustee_domain_id} - trustee_user_id: {get_param: trustee_user_id} - trustee_username: {get_param: trustee_username} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - registry_enabled: {get_param: registry_enabled} - registry_port: {get_param: registry_port} - swift_region: {get_param: swift_region} - registry_container: {get_param: registry_container} - registry_insecure: {get_param: registry_insecure} - registry_chunksize: {get_param: registry_chunksize} - volume_driver: {get_param: volume_driver} - rexray_preempt: {get_param: rexray_preempt} - -outputs: - - api_address: - value: - str_replace: - template: api_ip_address - params: - api_ip_address: {get_attr: [api_address_lb_switch, public_ip]} - description: > - This is the API endpoint of the Swarm masters. Use this to access - the Swarm API server from outside the cluster. - - swarm_masters_private: - value: {get_attr: [swarm_masters, swarm_master_ip]} - description: > - This is a list of the "private" addresses of all the Swarm masters. - - swarm_masters: - value: {get_attr: [swarm_masters, swarm_master_external_ip]} - description: > - This is a list of "public" ip addresses of all Swarm masters. - Use these addresses to log into the Swarm masters via ssh. - - swarm_nodes_private: - value: {get_attr: [swarm_nodes, swarm_node_ip]} - description: > - This is a list of the "private" addresses of all the Swarm nodes. - - swarm_nodes: - value: {get_attr: [swarm_nodes, swarm_node_external_ip]} - description: > - This is a list of the "public" addresses of all the Swarm nodes. Use - these addresses to, e.g., log into the nodes. - - discovery_url: - value: {get_param: discovery_url} - description: > - This the discovery url for Swarm cluster. diff --git a/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmmaster.yaml b/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmmaster.yaml deleted file mode 100644 index c319afb4..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmmaster.yaml +++ /dev/null @@ -1,476 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines swarm master node. A swarm mater node is - running a Docker daemon and a swarm manager container listening on port 2376. - -parameters: - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - constraints: - - allowed_values: ["devicemapper", "overlay"] - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - discovery_url: - type: string - description: url provided for node discovery - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - fixed_network_id: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet_id: - type: string - description: Subnet from which to allocate fixed addresses. - - swarm_api_ip: - type: string - description: swarm master's api server ip address - default: "" - - api_ip_address: - type: string - description: swarm master's api server public ip address - default: "" - - server_image: - type: string - description: glance image used to boot the server - - server_flavor: - type: string - description: flavor to use when booting the server - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - network_driver: - type: string - description: network driver to use for instantiating container networks - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each master - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - - swarm_version: - type: string - description: version of swarm used for swarm cluster - - swarm_strategy: - type: string - description: > - schedule strategy to be used by swarm manager - constraints: - - allowed_values: ["spread", "binpack", "random"] - - secgroup_swarm_master_id: - type: string - description: ID of the security group for swarm master. - - swarm_port: - type: number - description: > - The port which are used by swarm manager to provide swarm service. - - api_pool_id: - type: string - description: ID of the load balancer pool of swarm master server. - - etcd_pool_id: - type: string - description: ID of the load balancer pool of etcd server. - - etcd_server_ip: - type: string - description: ip address of the load balancer pool of etcd server. - default: "" - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - auth_url: - type: string - description: url for keystone - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - default: "false" - -resources: - - master_wait_handle: - type: "OS::Heat::WaitConditionHandle" - - master_wait_condition: - type: "OS::Heat::WaitCondition" - depends_on: swarm-master - properties: - handle: {get_resource: master_wait_handle} - timeout: 6000 - - ###################################################################### - # - # resource that exposes the IPs of either the Swarm master or the API - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_param: api_ip_address} - pool_private_ip: {get_param: swarm_api_ip} - master_public_ip: {get_attr: [swarm_master_floating, floating_ip_address]} - master_private_ip: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - - etcd_address_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_private_ip: {get_param: etcd_server_ip} - master_private_ip: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - write_heat_params: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/write-heat-params-master.yaml} - params: - "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} - "$DOCKER_VOLUME": {get_resource: docker_volume} - "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} - "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} - "$ETCD_DISCOVERY_URL": {get_param: discovery_url} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$SWARM_API_IP": {get_attr: [api_address_switch, private_ip]} - "$SWARM_NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$TLS_DISABLED": {get_param: tls_disabled} - "$NETWORK_DRIVER": {get_param: network_driver} - "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} - "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} - "$FLANNEL_BACKEND": {get_param: flannel_backend} - "$ETCD_SERVER_IP": {get_attr: [etcd_address_switch, private_ip]} - "$API_IP_ADDRESS": {get_attr: [api_address_switch, public_ip]} - "$SWARM_VERSION": {get_param: swarm_version} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$AUTH_URL": {get_param: auth_url} - "$VOLUME_DRIVER": {get_param: volume_driver} - "$REXRAY_PREEMPT": {get_param: rexray_preempt} - - write_network_config: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/write-network-config.sh} - - network_config_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/network-config-service.sh} - - network_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/network-service.sh} - - configure_etcd: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/configure-etcd.sh} - - remove_docker_key: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - make_cert: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/make-cert.py} - - add_docker_daemon_options: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh} - - write_swarm_manager_failure_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/write-cluster-failure-service.yaml} - params: - "$SERVICE": swarm-manager - "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} - - write_docker_socket: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml} - - write_swarm_master_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/write-swarm-master-service.sh} - params: - "$ETCD_SERVER_IP": {get_attr: [etcd_address_switch, private_ip]} - "$NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$TLS_DISABLED": {get_param: tls_disabled} - "$SWARM_VERSION": {get_param: swarm_version} - "$SWARM_STRATEGY": {get_param: swarm_strategy} - - enable_services: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh} - params: - "$NODE_SERVICES": "etcd docker.socket docker swarm-manager" - - cfn_signal: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/cfn-signal.sh} - - configure_selinux: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh} - - add_proxy: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh} - - volume_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh} - - swarm_master_init: - type: "OS::Heat::MultipartMime" - properties: - parts: - - config: {get_resource: configure_selinux} - - config: {get_resource: remove_docker_key} - - config: {get_resource: write_heat_params} - - config: {get_resource: make_cert} - - config: {get_resource: configure_etcd} - - config: {get_resource: write_network_config} - - config: {get_resource: network_config_service} - - config: {get_resource: network_service} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: write_swarm_manager_failure_service} - - config: {get_resource: add_docker_daemon_options} - - config: {get_resource: write_docker_socket} - - config: {get_resource: write_swarm_master_service} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_services} - - config: {get_resource: cfn_signal} - - config: {get_resource: volume_service} - - ###################################################################### - # - # Swarm_manager is a special node running the swarm manage daemon along - # side the swarm agent. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - swarm-master: - type: "OS::Nova::Server" - properties: - image: - get_param: server_image - flavor: - get_param: server_flavor - key_name: - get_param: ssh_key_name - user_data_format: RAW - user_data: {get_resource: swarm_master_init} - networks: - - port: - get_resource: swarm_master_eth0 - - swarm_master_eth0: - type: "OS::Neutron::Port" - properties: - network_id: - get_param: fixed_network_id - security_groups: - - {get_param: secgroup_swarm_master_id} - fixed_ips: - - subnet_id: - get_param: fixed_subnet_id - allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} - - swarm_master_floating: - type: "OS::Neutron::FloatingIP" - properties: - floating_network: - get_param: external_network - port_id: - get_resource: swarm_master_eth0 - - api_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_id} - address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet_id } - protocol_port: {get_param: swarm_port} - - etcd_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: etcd_pool_id} - address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet_id } - protocol_port: 2379 - - ###################################################################### - # - # docker storage. This allocates a cinder volume and attaches it - # to the node. - # - - docker_volume: - type: Magnum::Optional::Cinder::Volume - properties: - size: {get_param: docker_volume_size} - volume_type: {get_param: docker_volume_type} - - docker_volume_attach: - type: Magnum::Optional::Cinder::VolumeAttachment - properties: - instance_uuid: {get_resource: swarm-master} - volume_id: {get_resource: docker_volume} - mountpoint: /dev/vdb - -outputs: - - swarm_master_ip: - value: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" addresses of all the Swarm master. - - swarm_master_external_ip: - value: {get_attr: [swarm_master_floating, floating_ip_address]} - description: > - This is the "public" ip addresses of Swarm master. diff --git a/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmnode.yaml b/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmnode.yaml deleted file mode 100644 index 2acdab2b..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmnode.yaml +++ /dev/null @@ -1,417 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single swarm node, - based on a vanilla Fedora 20 cloud image. This stack is included by - a ResourceGroup resource in the parent template (swarmcluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - server_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - constraints: - - allowed_values: ["devicemapper", "overlay"] - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network_id: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet_id: - type: string - description: Subnet from which to allocate fixed addresses. - - network_driver: - type: string - description: network driver to use for instantiating container networks - - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - swarm_api_ip: - type: string - description: swarm master's api server ip address - - api_ip_address: - type: string - description: swarm master's api server public ip address - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - tls_disabled: - type: boolean - description: whether or not to disable TLS - - swarm_version: - type: string - description: version of swarm used for swarm cluster - - secgroup_swarm_node_id: - type: string - description: ID of the security group for swarm node. - - etcd_server_ip: - type: string - description: ip address of the load balancer pool of etcd server. - - trustee_domain_id: - type: string - description: domain id of the trustee - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_username: - type: string - description: username of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - auth_url: - type: string - description: url for keystone - - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - - registry_port: - type: number - description: port of registry service - - swift_region: - type: string - description: region of swift service - - registry_container: - type: string - description: > - name of swift container which docker registry stores images in - - registry_insecure: - type: boolean - description: > - indicates whether to skip TLS verification between registry and backend storage - - registry_chunksize: - type: number - description: > - size fo the data segments for the swift dynamic large objects - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - default: "false" - -resources: - - node_wait_handle: - type: "OS::Heat::WaitConditionHandle" - - node_wait_condition: - type: "OS::Heat::WaitCondition" - depends_on: swarm-node - properties: - handle: {get_resource: node_wait_handle} - timeout: 6000 - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - write_heat_params: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/write-heat-params-node.yaml} - params: - "$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]} - "$DOCKER_VOLUME": {get_resource: docker_volume} - "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} - "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$SWARM_API_IP": {get_param: swarm_api_ip} - "$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$TLS_DISABLED": {get_param: tls_disabled} - "$NETWORK_DRIVER": {get_param: network_driver} - "$ETCD_SERVER_IP": {get_param: etcd_server_ip} - "$API_IP_ADDRESS": {get_param: api_ip_address} - "$SWARM_VERSION": {get_param: swarm_version} - "$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_USERNAME": {get_param: trustee_username} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$AUTH_URL": {get_param: auth_url} - "$REGISTRY_ENABLED": {get_param: registry_enabled} - "$REGISTRY_PORT": {get_param: registry_port} - "$SWIFT_REGION": {get_param: swift_region} - "$REGISTRY_CONTAINER": {get_param: registry_container} - "$REGISTRY_INSECURE": {get_param: registry_insecure} - "$REGISTRY_CHUNKSIZE": {get_param: registry_chunksize} - "$VOLUME_DRIVER": {get_param: volume_driver} - "$REXRAY_PREEMPT": {get_param: rexray_preempt} - - remove_docker_key: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh} - - make_cert: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/make-cert.py} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - configure_docker_registry: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh} - - add_docker_daemon_options: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh} - - write_docker_socket: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml} - - network_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/network-service.sh} - - write_swarm_agent_failure_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/write-cluster-failure-service.yaml} - params: - "$SERVICE": swarm-agent - "$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]} - - write_swarm_agent_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/write-swarm-agent-service.sh} - - enable_docker_registry: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/fragments/enable-docker-registry.sh} - - enable_services: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh} - params: - "$NODE_SERVICES": "docker.socket docker swarm-agent" - - cfn_signal: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/cfn-signal.sh} - - configure_selinux: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh} - - add_proxy: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh} - - volume_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh} - - swarm_node_init: - type: "OS::Heat::MultipartMime" - properties: - parts: - - config: {get_resource: configure_selinux} - - config: {get_resource: remove_docker_key} - - config: {get_resource: write_heat_params} - - config: {get_resource: make_cert} - - config: {get_resource: network_service} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: configure_docker_registry} - - config: {get_resource: write_swarm_agent_failure_service} - - config: {get_resource: write_swarm_agent_service} - - config: {get_resource: add_docker_daemon_options} - - config: {get_resource: write_docker_socket} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_docker_registry} - - config: {get_resource: enable_services} - - config: {get_resource: cfn_signal} - - config: {get_resource: volume_service} - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - swarm-node: - type: "OS::Nova::Server" - properties: - image: - get_param: server_image - flavor: - get_param: server_flavor - key_name: - get_param: ssh_key_name - user_data_format: RAW - user_data: {get_resource: swarm_node_init} - networks: - - port: - get_resource: swarm_node_eth0 - - swarm_node_eth0: - type: "OS::Neutron::Port" - properties: - network_id: - get_param: fixed_network_id - security_groups: - - {get_param: secgroup_swarm_node_id} - fixed_ips: - - subnet_id: - get_param: fixed_subnet_id - allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} - - swarm_node_floating: - type: "OS::Neutron::FloatingIP" - properties: - floating_network: - get_param: external_network - port_id: - get_resource: swarm_node_eth0 - - ###################################################################### - # - # docker storage. This allocates a cinder volume and attaches it - # to the node. - # - - docker_volume: - type: Magnum::Optional::Cinder::Volume - properties: - size: {get_param: docker_volume_size} - volume_type: {get_param: docker_volume_type} - - docker_volume_attach: - type: Magnum::Optional::Cinder::VolumeAttachment - properties: - instance_uuid: {get_resource: swarm-node} - volume_id: {get_resource: docker_volume} - mountpoint: /dev/vdb - -outputs: - - swarm_node_ip: - value: {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" address of the Swarm node. - - swarm_node_external_ip: - value: {get_attr: [swarm_node_floating, floating_ip_address]} - description: > - This is the "public" address of the Swarm node. diff --git a/magnum/drivers/swarm_fedora_atomic_v1/version.py b/magnum/drivers/swarm_fedora_atomic_v1/version.py deleted file mode 100644 index 6bf10862..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v1/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 - Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version = '1.0.0' -driver = 'swarm_fedora_atomic_v1' -container_version = '1.12.6' diff --git a/magnum/drivers/swarm_fedora_atomic_v2/__init__.py b/magnum/drivers/swarm_fedora_atomic_v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/drivers/swarm_fedora_atomic_v2/driver.py b/magnum/drivers/swarm_fedora_atomic_v2/driver.py deleted file mode 100644 index b00d028a..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/driver.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.drivers.heat import driver -from magnum.drivers.swarm_fedora_atomic_v2 import monitor -from magnum.drivers.swarm_fedora_atomic_v2 import template_def - - -class Driver(driver.HeatDriver): - - @property - def provides(self): - return [ - {'server_type': 'vm', - 'os': 'fedora-atomic', - 'coe': 'swarm-mode'}, - ] - - def get_template_definition(self): - return template_def.AtomicSwarmTemplateDefinition() - - def get_monitor(self, context, cluster): - return monitor.SwarmMonitor(context, cluster) diff --git a/magnum/drivers/swarm_fedora_atomic_v2/monitor.py b/magnum/drivers/swarm_fedora_atomic_v2/monitor.py deleted file mode 100644 index 378f3506..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/monitor.py +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log - -from magnum.common import docker_utils -from magnum.conductor import monitors - -LOG = log.getLogger(__name__) - - -class SwarmMonitor(monitors.MonitorBase): - - def __init__(self, context, cluster): - super(SwarmMonitor, self).__init__(context, cluster) - self.data = {} - self.data['nodes'] = [] - self.data['containers'] = [] - - @property - def metrics_spec(self): - return { - 'memory_util': { - 'unit': '%', - 'func': 'compute_memory_util', - }, - } - - def pull_data(self): - with docker_utils.docker_for_cluster(self.context, - self.cluster) as docker: - system_info = docker.info() - self.data['nodes'] = self._parse_node_info(system_info) - - # pull data from each container - containers = [] - for container in docker.containers(all=True): - try: - container = docker.inspect_container(container['Id']) - except Exception as e: - LOG.warning("Ignore error [%(e)s] when inspecting " - "container %(container_id)s.", - {'e': e, 'container_id': container['Id']}, - exc_info=True) - containers.append(container) - self.data['containers'] = containers - - def compute_memory_util(self): - mem_total = 0 - for node in self.data['nodes']: - mem_total += node['MemTotal'] - mem_reserved = 0 - for container in self.data['containers']: - mem_reserved += container['HostConfig']['Memory'] - - if mem_total == 0: - return 0 - else: - return mem_reserved * 100 / mem_total - - def _parse_node_info(self, system_info): - """Parse system_info to retrieve memory size of each node. - - :param system_info: The output returned by docker.info(). Example: - { - u'Debug': False, - u'NEventsListener': 0, - u'DriverStatus': [ - [u'\x08Strategy', u'spread'], - [u'\x08Filters', u'...'], - [u'\x08Nodes', u'2'], - [u'node1', u'10.0.0.4:2375'], - [u' \u2514 Containers', u'1'], - [u' \u2514 Reserved CPUs', u'0 / 1'], - [u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'], - [u'node2', u'10.0.0.3:2375'], - [u' \u2514 Containers', u'2'], - [u' \u2514 Reserved CPUs', u'0 / 1'], - [u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'] - ], - u'Containers': 3 - } - :return: Memory size of each node. Excample: - [{'MemTotal': 2203318222.848}, - {'MemTotal': 2203318222.848}] - """ - nodes = [] - for info in system_info['DriverStatus']: - key = info[0] - value = info[1] - if key == u' \u2514 Reserved Memory': - memory = value # Example: '0 B / 2.052 GiB' - memory = memory.split('/')[1].strip() # Example: '2.052 GiB' - memory = memory.split(' ')[0] # Example: '2.052' - memory = float(memory) * 1024 * 1024 * 1024 - nodes.append({'MemTotal': memory}) - return nodes diff --git a/magnum/drivers/swarm_fedora_atomic_v2/template_def.py b/magnum/drivers/swarm_fedora_atomic_v2/template_def.py deleted file mode 100644 index 7dcc858c..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/template_def.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2016 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from magnum.drivers.heat import swarm_mode_template_def as sftd - - -class AtomicSwarmTemplateDefinition(sftd.SwarmModeTemplateDefinition): - """Docker swarm template for a Fedora Atomic VM.""" - - @property - def driver_module_path(self): - return __name__[:__name__.rindex('.')] - - @property - def template_path(self): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'templates/swarmcluster.yaml') - - def get_params(self, context, cluster_template, cluster, **kwargs): - ep = kwargs.pop('extra_params', {}) - - ep['number_of_secondary_masters'] = cluster.master_count - 1 - - return super(AtomicSwarmTemplateDefinition, - self).get_params(context, cluster_template, cluster, - extra_params=ep, - **kwargs) diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-heat-params-master.yaml b/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-heat-params-master.yaml deleted file mode 100644 index f6f2d5f6..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-heat-params-master.yaml +++ /dev/null @@ -1,28 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/sysconfig/heat-params - owner: "root:root" - permissions: "0600" - content: | - IS_PRIMARY_MASTER="$IS_PRIMARY_MASTER" - WAIT_CURL="$WAIT_CURL" - DOCKER_VOLUME="$DOCKER_VOLUME" - DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" - DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" - HTTP_PROXY="$HTTP_PROXY" - HTTPS_PROXY="$HTTPS_PROXY" - NO_PROXY="$NO_PROXY" - PRIMARY_MASTER_IP="$PRIMARY_MASTER_IP" - SWARM_API_IP="$SWARM_API_IP" - SWARM_NODE_IP="$SWARM_NODE_IP" - CLUSTER_UUID="$CLUSTER_UUID" - MAGNUM_URL="$MAGNUM_URL" - TLS_DISABLED="$TLS_DISABLED" - API_IP_ADDRESS="$API_IP_ADDRESS" - TRUSTEE_USER_ID="$TRUSTEE_USER_ID" - TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD" - TRUST_ID="$TRUST_ID" - AUTH_URL="$AUTH_URL" - VOLUME_DRIVER="$VOLUME_DRIVER" - REXRAY_PREEMPT="$REXRAY_PREEMPT" diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-master-service.sh b/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-master-service.sh deleted file mode 100644 index 2c978b41..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-master-service.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -. /etc/sysconfig/heat-params - -set -x - -if [ "${IS_PRIMARY_MASTER}" = "True" ]; then - cat > /usr/local/bin/magnum-start-swarm-manager << START_SWARM_BIN -#!/bin/bash -xe - -docker swarm init --advertise-addr "${SWARM_NODE_IP}" -if [[ \$? -eq 0 ]]; then - status="SUCCESS" - msg="Swarm init was successful." -else - status="FAILURE" - msg="Failed to init swarm." -fi -sh -c "${WAIT_CURL} --data-binary '{\"status\": \"\$status\", \"reason\": \"\$msg\"}'" -START_SWARM_BIN -else - if [ "${TLS_DISABLED}" = 'False' ]; then - tls="--tlsverify" - tls=$tls" --tlscacert=/etc/docker/ca.crt" - tls=$tls" --tlskey=/etc/docker/server.key" - tls=$tls" --tlscert=/etc/docker/server.crt" - fi - - cat > /usr/local/bin/magnum-start-swarm-manager << START_SWARM_BIN -#!/bin/bash -xe -i=0 -until token=\$(docker $tls -H $PRIMARY_MASTER_IP swarm join-token --quiet manager) -do - ((i++)) - [ \$i -lt 5 ] || break; - sleep 5 -done - -if [[ -z \$token ]] ; then - sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Failed to retrieve swarm join token.\"}'" -fi - -i=0 -until docker swarm join --token \$token $PRIMARY_MASTER_IP:2377 -do - ((i++)) - [ \$i -lt 5 ] || break; - sleep 5 -done -if [[ \$i -ge 5 ]] ; then - sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Manager failed to join swarm.\"}'" -else - sh -c "${WAIT_CURL} --data-binary '{\"status\": \"SUCCESS\", \"reason\": \"Manager joined swarm.\"}'" -fi -START_SWARM_BIN -fi -chmod +x /usr/local/bin/magnum-start-swarm-manager - -cat > /etc/systemd/system/swarm-manager.service << END_SERVICE -[Unit] -Description=Swarm Manager -After=docker.service -Requires=docker.service - -[Service] -Type=oneshot -ExecStart=/usr/local/bin/magnum-start-swarm-manager - -[Install] -WantedBy=multi-user.target -END_SERVICE - -chown root:root /etc/systemd/system/swarm-manager.service -chmod 644 /etc/systemd/system/swarm-manager.service - -systemctl daemon-reload -systemctl start --no-block swarm-manager - diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-worker-service.sh b/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-worker-service.sh deleted file mode 100644 index 6bc8448c..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/fragments/write-swarm-worker-service.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -. /etc/sysconfig/heat-params - -set -x - -if [ "${TLS_DISABLED}" = 'False' ]; then - tls="--tlsverify" - tls=$tls" --tlscacert=/etc/docker/ca.crt" - tls=$tls" --tlskey=/etc/docker/server.key" - tls=$tls" --tlscert=/etc/docker/server.crt" -fi -cat > /usr/local/bin/magnum-start-swarm-worker << START_SWARM_BIN -#!/bin/bash -ex - -i=0 -until token=\$(/usr/bin/docker $tls -H $SWARM_API_IP swarm join-token --quiet worker) -do - ((i++)) - [ \$i -lt 5 ] || break; - sleep 5 -done - -if [[ -z \$token ]] ; then - sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Failed to retrieve swarm join token.\"}'" -fi - -i=0 -until docker swarm join --token \$token $SWARM_API_IP:2377 -do - ((i++)) - [ \$i -lt 5 ] || break; - sleep 5 -done -if [[ \$i -ge 5 ]] ; then - sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Node failed to join swarm.\"}'" -else - sh -c "${WAIT_CURL} --data-binary '{\"status\": \"SUCCESS\", \"reason\": \"Node joined swarm.\"}'" -fi -START_SWARM_BIN - -chmod +x /usr/local/bin/magnum-start-swarm-worker - -cat > /etc/systemd/system/swarm-worker.service << END_SERVICE -[Unit] -Description=Swarm Worker -After=docker.service -Requires=docker.service - -[Service] -Type=oneshot -ExecStart=/usr/local/bin/magnum-start-swarm-worker - -[Install] -WantedBy=multi-user.target -END_SERVICE - -chown root:root /etc/systemd/system/swarm-worker.service -chmod 644 /etc/systemd/system/swarm-worker.service - -systemctl daemon-reload -systemctl start --no-block swarm-worker diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml deleted file mode 100644 index 9ed72768..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml +++ /dev/null @@ -1,413 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This template will boot a Docker Swarm-Mode cluster. A swarm cluster - is made up of several master nodes, and N worker nodes. Every node in - the cluster, including the master, is running a Docker daemon and - joins the swarm as a manager or as a worker. The managers are - listening on port 2375. By default, the cluster is made up of one - master node and one worker node. - -parameters: - - # - # REQUIRED PARAMETERS - # - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network: - type: string - description: uuid/name of an existing network to use to provision machines - default: "" - - fixed_subnet: - type: string - description: uuid/name of an existing subnet to use to provision machines - default: "" - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - server_image: - type: string - description: glance image used to boot the server - - # - # OPTIONAL PARAMETERS - # - master_flavor: - type: string - description: flavor to use when booting the swarm master - default: m1.small - - node_flavor: - type: string - description: flavor to use when booting the swarm node - - dns_nameserver: - type: string - description: address of a dns nameserver reachable in your environment - default: 8.8.8.8 - - http_proxy: - type: string - description: http proxy address for docker - default: "" - - https_proxy: - type: string - description: https proxy address for docker - default: "" - - no_proxy: - type: string - description: no proxies for docker - default: "" - - number_of_masters: - type: number - description: how many swarm masters to spawn - default: 1 - - number_of_nodes: - type: number - description: how many swarm nodes to spawn - default: 1 - - number_of_secondary_masters: - type: number - description: how many secondary masters to spawn - - fixed_network_cidr: - type: string - description: network range for fixed ip network - default: "10.0.0.0/24" - - tls_disabled: - type: boolean - description: whether or not to enable TLS - default: False - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - default: 0 - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - default: "devicemapper" - constraints: - - allowed_values: ["devicemapper", "overlay"] - - loadbalancing_protocol: - type: string - description: > - The protocol which is used for load balancing. If you want to change - tls_disabled option to 'True', please change this to "HTTP". - default: TCP - constraints: - - allowed_values: ["TCP", "HTTP"] - - swarm_port: - type: number - description: > - The port which are used by swarm manager to provide swarm service. - default: 2375 - - trustee_domain_id: - type: string - description: domain id of the trustee - default: "" - - trustee_user_id: - type: string - description: user id of the trustee - default: "" - - trustee_username: - type: string - description: username of the trustee - default: "" - - trustee_password: - type: string - description: password of the trustee - default: "" - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - default: "" - hidden: true - - auth_url: - type: string - description: url for keystone - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - constraints: - - allowed_values: ["","rexray"] - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - default: "false" - - -resources: - - ###################################################################### - # - # network resources. allocate a network and router for our server. - # it would also be possible to take advantage of existing network - # resources (and have the deployer provide network and subnet ids, - # etc, as parameters), but I wanted to minmize the amount of - # configuration necessary to make this go. - - network: - type: ../../common/templates/network.yaml - properties: - existing_network: {get_param: fixed_network} - existing_subnet: {get_param: fixed_subnet} - private_network_cidr: {get_param: fixed_network_cidr} - dns_nameserver: {get_param: dns_nameserver} - external_network: {get_param: external_network} - - api_lb: - type: ../../common/templates/lb.yaml - properties: - fixed_subnet: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - protocol: {get_param: loadbalancing_protocol} - port: {get_param: swarm_port} - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup_swarm_manager: - type: "OS::Neutron::SecurityGroup" - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 2375 - port_range_max: 2375 - - protocol: tcp - remote_ip_prefix: {get_param: fixed_network_cidr} - port_range_min: 1 - port_range_max: 65535 - - protocol: udp - port_range_min: 53 - port_range_max: 53 - - secgroup_swarm_node: - type: "OS::Neutron::SecurityGroup" - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # resources that expose the IPs of either the swarm master or a given - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_lb_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_attr: [api_lb, floating_address]} - pool_private_ip: {get_attr: [api_lb, address]} - master_public_ip: {get_attr: [swarm_primary_master, resource.0.swarm_master_external_ip]} - master_private_ip: {get_attr: [swarm_primary_master, resource.0.swarm_master_ip]} - - ###################################################################### - # - # Swarm manager is responsible for the entire cluster and manages the - # resources of multiple Docker hosts at scale. - # It supports high availability by create a primary manager and multiple - # replica instances. - - swarm_primary_master: - type: "OS::Heat::ResourceGroup" - depends_on: - - network - properties: - count: 1 - resource_def: - type: swarmmaster.yaml - properties: - is_primary_master: True - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - server_flavor: {get_param: master_flavor} - docker_volume_size: {get_param: docker_volume_size} - docker_volume_type: {get_param: docker_volume_type} - docker_storage_driver: {get_param: docker_storage_driver} - fixed_network_id: {get_attr: [network, fixed_network]} - fixed_subnet_id: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - swarm_api_ip: {get_attr: [api_lb, address]} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - tls_disabled: {get_param: tls_disabled} - secgroup_swarm_master_id: {get_resource: secgroup_swarm_manager} - swarm_port: {get_param: swarm_port} - api_pool_id: {get_attr: [api_lb, pool_id]} - api_ip_address: {get_attr: [api_lb, floating_address]} - trustee_user_id: {get_param: trustee_user_id} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - volume_driver: {get_param: volume_driver} - rexray_preempt: {get_param: rexray_preempt} - - swarm_secondary_masters: - type: "OS::Heat::ResourceGroup" - depends_on: - - network - - swarm_primary_master - properties: - count: {get_param: number_of_secondary_masters} - resource_def: - type: swarmmaster.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - server_flavor: {get_param: master_flavor} - docker_volume_size: {get_param: docker_volume_size} - docker_volume_type: {get_param: docker_volume_type} - docker_storage_driver: {get_param: docker_storage_driver} - fixed_network_id: {get_attr: [network, fixed_network]} - fixed_subnet_id: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - swarm_api_ip: {get_attr: [api_address_lb_switch, private_ip]} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - tls_disabled: {get_param: tls_disabled} - secgroup_swarm_master_id: {get_resource: secgroup_swarm_manager} - swarm_port: {get_param: swarm_port} - api_pool_id: {get_attr: [api_lb, pool_id]} - api_ip_address: {get_attr: [api_lb, floating_address]} - trustee_user_id: {get_param: trustee_user_id} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - volume_driver: {get_param: volume_driver} - rexray_preempt: {get_param: rexray_preempt} - - swarm_nodes: - type: "OS::Heat::ResourceGroup" - depends_on: - - network - - swarm_primary_master - properties: - count: {get_param: number_of_nodes} - resource_def: - type: swarmnode.yaml - properties: - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - server_flavor: {get_param: node_flavor} - docker_volume_size: {get_param: docker_volume_size} - docker_volume_type: {get_param: docker_volume_type} - docker_storage_driver: {get_param: docker_storage_driver} - fixed_network_id: {get_attr: [network, fixed_network]} - fixed_subnet_id: {get_attr: [network, fixed_subnet]} - external_network: {get_param: external_network} - http_proxy: {get_param: http_proxy} - https_proxy: {get_param: https_proxy} - no_proxy: {get_param: no_proxy} - swarm_api_ip: {get_attr: [api_address_lb_switch, private_ip]} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} - tls_disabled: {get_param: tls_disabled} - secgroup_swarm_node_id: {get_resource: secgroup_swarm_node} - api_ip_address: {get_attr: [api_address_lb_switch, public_ip]} - trustee_domain_id: {get_param: trustee_domain_id} - trustee_user_id: {get_param: trustee_user_id} - trustee_username: {get_param: trustee_username} - trustee_password: {get_param: trustee_password} - trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - volume_driver: {get_param: volume_driver} - rexray_preempt: {get_param: rexray_preempt} - -outputs: - - api_address: - value: - str_replace: - template: api_ip_address - params: - api_ip_address: {get_attr: [api_address_lb_switch, public_ip]} - description: > - This is the API endpoint of the Swarm masters. Use this to access - the Swarm API server from outside the cluster. - - swarm_primary_master_private: - value: {get_attr: [swarm_primary_master, swarm_master_ip]} - description: > - This is a list of the "private" addresses of all the Swarm masters. - - swarm_primary_master: - value: {get_attr: [swarm_primary_master, swarm_master_external_ip]} - description: > - This is a list of "public" ip addresses of all Swarm masters. - Use these addresses to log into the Swarm masters via ssh. - - swarm_secondary_masters: - value: {get_attr: [swarm_secondary_masters, swarm_master_external_ip]} - description: > - This is a list of "public" ip addresses of all Swarm masters. - Use these addresses to log into the Swarm masters via ssh. - - swarm_nodes_private: - value: {get_attr: [swarm_nodes, swarm_node_ip]} - description: > - This is a list of the "private" addresses of all the Swarm nodes. - - swarm_nodes: - value: {get_attr: [swarm_nodes, swarm_node_external_ip]} - description: > - This is a list of the "public" addresses of all the Swarm nodes. Use - these addresses to, e.g., log into the nodes. diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml deleted file mode 100644 index ecc72bc5..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml +++ /dev/null @@ -1,359 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines swarm master node. A swarm mater node is - running a Docker daemon and joins swarm as a manager. The Docker daemon - listens on port 2375. - -parameters: - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - constraints: - - allowed_values: ["devicemapper", "overlay"] - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - fixed_network_id: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet_id: - type: string - description: Subnet from which to allocate fixed addresses. - - swarm_api_ip: - type: string - description: swarm master's api server ip address - default: "" - - api_ip_address: - type: string - description: swarm master's api server public ip address - default: "" - - server_image: - type: string - description: glance image used to boot the server - - server_flavor: - type: string - description: flavor to use when booting the server - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - tls_disabled: - type: boolean - description: whether or not to enable TLS - - secgroup_swarm_master_id: - type: string - description: ID of the security group for swarm master. - - swarm_port: - type: number - description: > - The port which are used by swarm manager to provide swarm service. - - api_pool_id: - type: string - description: ID of the load balancer pool of swarm master server. - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - auth_url: - type: string - description: url for keystone - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - default: "false" - - is_primary_master: - type: boolean - description: whether this master is primary or not - default: False - -resources: - - master_wait_handle: - type: "OS::Heat::WaitConditionHandle" - - master_wait_condition: - type: "OS::Heat::WaitCondition" - depends_on: swarm-master - properties: - handle: {get_resource: master_wait_handle} - timeout: 6000 - - ###################################################################### - # - # resource that exposes the IPs of either the Swarm master or the API - # LBaaS pool depending on whether LBaaS is enabled for the cluster. - # - - api_address_switch: - type: Magnum::ApiGatewaySwitcher - properties: - pool_public_ip: {get_param: api_ip_address} - pool_private_ip: {get_param: swarm_api_ip} - master_public_ip: {get_attr: [swarm_master_floating, floating_ip_address]} - master_private_ip: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - write_heat_params: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params-master.yaml} - params: - "$IS_PRIMARY_MASTER": {get_param: is_primary_master} - "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} - "$DOCKER_VOLUME": {get_resource: docker_volume} - "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} - "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$PRIMARY_MASTER_IP": {get_param: swarm_api_ip} - "$SWARM_API_IP": {get_attr: [api_address_switch, private_ip]} - "$SWARM_NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$TLS_DISABLED": {get_param: tls_disabled} - "$API_IP_ADDRESS": {get_attr: [api_address_switch, public_ip]} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$AUTH_URL": {get_param: auth_url} - "$VOLUME_DRIVER": {get_param: volume_driver} - "$REXRAY_PREEMPT": {get_param: rexray_preempt} - - remove_docker_key: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - make_cert: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/make-cert.py} - - add_docker_daemon_options: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh} - - write_docker_socket: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml} - - write_swarm_master_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: fragments/write-swarm-master-service.sh} - - enable_services: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh} - params: - "$NODE_SERVICES": "docker.socket docker" - - configure_selinux: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh} - - add_proxy: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh} - - volume_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh} - - swarm_master_init: - type: "OS::Heat::MultipartMime" - properties: - parts: - - config: {get_resource: configure_selinux} - - config: {get_resource: remove_docker_key} - - config: {get_resource: write_heat_params} - - config: {get_resource: make_cert} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: add_docker_daemon_options} - - config: {get_resource: write_docker_socket} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_services} - - config: {get_resource: write_swarm_master_service} - - config: {get_resource: volume_service} - - ###################################################################### - # - # Swarm_manager is a special node running the swarm manage daemon along - # side the swarm worker. - # - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - swarm-master: - type: "OS::Nova::Server" - properties: - image: - get_param: server_image - flavor: - get_param: server_flavor - key_name: - get_param: ssh_key_name - user_data_format: RAW - user_data: {get_resource: swarm_master_init} - networks: - - port: - get_resource: swarm_master_eth0 - - swarm_master_eth0: - type: "OS::Neutron::Port" - properties: - network_id: - get_param: fixed_network_id - security_groups: - - {get_param: secgroup_swarm_master_id} - fixed_ips: - - subnet_id: - get_param: fixed_subnet_id - - swarm_master_floating: - type: "OS::Neutron::FloatingIP" - properties: - floating_network: - get_param: external_network - port_id: - get_resource: swarm_master_eth0 - - api_pool_member: - type: Magnum::Optional::Neutron::LBaaS::PoolMember - properties: - pool: {get_param: api_pool_id} - address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - subnet: { get_param: fixed_subnet_id } - protocol_port: {get_param: swarm_port} - - ###################################################################### - # - # docker storage. This allocates a cinder volume and attaches it - # to the node. - # - - docker_volume: - type: Magnum::Optional::Cinder::Volume - properties: - size: {get_param: docker_volume_size} - volume_type: {get_param: docker_volume_type} - - docker_volume_attach: - type: Magnum::Optional::Cinder::VolumeAttachment - properties: - instance_uuid: {get_resource: swarm-master} - volume_id: {get_resource: docker_volume} - mountpoint: /dev/vdb - -outputs: - - swarm_master_ip: - value: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" addresses of all the Swarm master. - - swarm_master_external_ip: - value: {get_attr: [swarm_master_floating, floating_ip_address]} - description: > - This is the "public" ip addresses of Swarm master. diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml deleted file mode 100644 index 17fc2854..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml +++ /dev/null @@ -1,322 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - This is a nested stack that defines a single swarm worker node, based on a - vanilla Fedora Atomic image. This stack is included by a ResourceGroup - resource in the parent template (swarmcluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - server_flavor: - type: string - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - docker_volume_size: - type: number - description: > - size of a cinder volume to allocate to docker for container/image - storage - - docker_volume_type: - type: string - description: > - type of a cinder volume to allocate to docker for container/image - storage - - docker_storage_driver: - type: string - description: docker storage driver name - constraints: - - allowed_values: ["devicemapper", "overlay"] - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network_id: - type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet_id: - type: string - description: Subnet from which to allocate fixed addresses. - - http_proxy: - type: string - description: http proxy address for docker - - https_proxy: - type: string - description: https proxy address for docker - - no_proxy: - type: string - description: no proxies for docker - - swarm_api_ip: - type: string - description: swarm master's api server ip address - - api_ip_address: - type: string - description: swarm master's api server public ip address - - cluster_uuid: - type: string - description: identifier for the cluster this template is generating - - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - - tls_disabled: - type: boolean - description: whether or not to disable TLS - - secgroup_swarm_node_id: - type: string - description: ID of the security group for swarm node. - - trustee_domain_id: - type: string - description: domain id of the trustee - - trustee_user_id: - type: string - description: user id of the trustee - - trustee_username: - type: string - description: username of the trustee - - trustee_password: - type: string - description: password of the trustee - hidden: true - - trust_id: - type: string - description: id of the trust which is used by the trustee - hidden: true - - auth_url: - type: string - description: url for keystone - - volume_driver: - type: string - description: volume driver to use for container storage - default: "" - - rexray_preempt: - type: string - description: > - enables any host to take control of a volume irrespective of whether - other hosts are using the volume - default: "false" - -resources: - - node_wait_handle: - type: "OS::Heat::WaitConditionHandle" - - node_wait_condition: - type: "OS::Heat::WaitCondition" - depends_on: swarm-node - properties: - handle: {get_resource: node_wait_handle} - timeout: 6000 - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - write_heat_params: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/write-heat-params-node.yaml} - params: - "$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]} - "$DOCKER_VOLUME": {get_resource: docker_volume} - "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} - "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} - "$HTTP_PROXY": {get_param: http_proxy} - "$HTTPS_PROXY": {get_param: https_proxy} - "$NO_PROXY": {get_param: no_proxy} - "$SWARM_API_IP": {get_param: swarm_api_ip} - "$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} - "$CLUSTER_UUID": {get_param: cluster_uuid} - "$MAGNUM_URL": {get_param: magnum_url} - "$TLS_DISABLED": {get_param: tls_disabled} - "$API_IP_ADDRESS": {get_param: api_ip_address} - "$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id} - "$TRUSTEE_USER_ID": {get_param: trustee_user_id} - "$TRUSTEE_USERNAME": {get_param: trustee_username} - "$TRUSTEE_PASSWORD": {get_param: trustee_password} - "$TRUST_ID": {get_param: trust_id} - "$AUTH_URL": {get_param: auth_url} - "$VOLUME_DRIVER": {get_param: volume_driver} - "$REXRAY_PREEMPT": {get_param: rexray_preempt} - - remove_docker_key: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh} - - make_cert: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/make-cert.py} - - configure_docker_storage: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - params: - $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} - template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh} - - add_docker_daemon_options: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh} - - write_docker_socket: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml} - - write_swarm_worker_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: fragments/write-swarm-worker-service.sh} - - enable_services: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: - str_replace: - template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh} - params: - "$NODE_SERVICES": "docker.socket docker" - - configure_selinux: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh} - - add_proxy: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh} - - volume_service: - type: "OS::Heat::SoftwareConfig" - properties: - group: ungrouped - config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh} - - swarm_node_init: - type: "OS::Heat::MultipartMime" - properties: - parts: - - config: {get_resource: configure_selinux} - - config: {get_resource: remove_docker_key} - - config: {get_resource: write_heat_params} - - config: {get_resource: make_cert} - - config: {get_resource: configure_docker_storage} - - config: {get_resource: add_docker_daemon_options} - - config: {get_resource: write_docker_socket} - - config: {get_resource: add_proxy} - - config: {get_resource: enable_services} - - config: {get_resource: write_swarm_worker_service} - - config: {get_resource: volume_service} - - # do NOT use "_" (underscore) in the Nova server name - # it creates a mismatch between the generated Nova name and its hostname - # which can lead to weird problems - swarm-node: - type: "OS::Nova::Server" - properties: - image: - get_param: server_image - flavor: - get_param: server_flavor - key_name: - get_param: ssh_key_name - user_data_format: RAW - user_data: {get_resource: swarm_node_init} - networks: - - port: - get_resource: swarm_node_eth0 - - swarm_node_eth0: - type: "OS::Neutron::Port" - properties: - network_id: - get_param: fixed_network_id - security_groups: - - {get_param: secgroup_swarm_node_id} - fixed_ips: - - subnet_id: - get_param: fixed_subnet_id - - swarm_node_floating: - type: "OS::Neutron::FloatingIP" - properties: - floating_network: - get_param: external_network - port_id: - get_resource: swarm_node_eth0 - - ###################################################################### - # - # docker storage. This allocates a cinder volume and attaches it - # to the node. - # - - docker_volume: - type: Magnum::Optional::Cinder::Volume - properties: - size: {get_param: docker_volume_size} - volume_type: {get_param: docker_volume_type} - - docker_volume_attach: - type: Magnum::Optional::Cinder::VolumeAttachment - properties: - instance_uuid: {get_resource: swarm-node} - volume_id: {get_resource: docker_volume} - mountpoint: /dev/vdb - -outputs: - - swarm_node_ip: - value: {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]} - description: > - This is the "private" address of the Swarm node. - - swarm_node_external_ip: - value: {get_attr: [swarm_node_floating, floating_ip_address]} - description: > - This is the "public" address of the Swarm node. diff --git a/magnum/drivers/swarm_fedora_atomic_v2/version.py b/magnum/drivers/swarm_fedora_atomic_v2/version.py deleted file mode 100644 index 1f56aefb..00000000 --- a/magnum/drivers/swarm_fedora_atomic_v2/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 - Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version = '2.0.0' -driver = 'swarm_fedora_atomic_v2' -container_version = '1.12.6' diff --git a/magnum/hacking/__init__.py b/magnum/hacking/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/hacking/checks.py b/magnum/hacking/checks.py deleted file mode 100755 index ee264d88..00000000 --- a/magnum/hacking/checks.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) 2015 Intel, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -import pep8 # noqa - -""" -Guidelines for writing new hacking checks - - - Use only for Magnum specific tests. OpenStack general tests - should be submitted to the common 'hacking' module. - - Pick numbers in the range M3xx. Find the current test with - the highest allocated number and then pick the next value. - If nova has an N3xx code for that test, use the same number. - - Keep the test method code in the source file ordered based - on the M3xx value. - - List the new rule in the top level HACKING.rst file - - Add test cases for each new rule to magnum/tests/unit/test_hacking.py - -""" -UNDERSCORE_IMPORT_FILES = [] - -mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") -assert_equal_in_end_with_true_or_false_re = re.compile( - r"assertEqual\((\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)") -assert_equal_in_start_with_true_or_false_re = re.compile( - r"assertEqual\((True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)") -assert_equal_with_is_not_none_re = re.compile( - r"assertEqual\(.*?\s+is+\s+not+\s+None\)$") -assert_true_isinstance_re = re.compile( - r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " - "(\w|\.|\'|\"|\[|\])+\)\)") -dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") -assert_xrange_re = re.compile( - r"\s*xrange\s*\(") -log_translation = re.compile( - r"(.)*LOG\.(audit|error|critical)\(\s*('|\")") -log_translation_info = re.compile( - r"(.)*LOG\.(info)\(\s*(_\(|'|\")") -log_translation_exception = re.compile( - r"(.)*LOG\.(exception)\(\s*(_\(|'|\")") -log_translation_LW = re.compile( - r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")") -custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") -underscore_import_check = re.compile(r"(.)*import _(.)*") -translated_log = re.compile( - r"(.)*LOG\.(audit|error|info|critical|exception)" - "\(\s*_\(\s*('|\")") -string_translation = re.compile(r"[^_]*_\(\s*('|\")") - - -def no_mutable_default_args(logical_line): - msg = "M322: Method's default argument shouldn't be mutable!" - if mutable_default_args.match(logical_line): - yield (0, msg) - - -def assert_equal_not_none(logical_line): - """Check for assertEqual(A is not None) sentences M302""" - msg = "M302: assertEqual(A is not None) sentences not allowed." - res = assert_equal_with_is_not_none_re.search(logical_line) - if res: - yield (0, msg) - - -def assert_true_isinstance(logical_line): - """Check for assertTrue(isinstance(a, b)) sentences - - M316 - """ - if assert_true_isinstance_re.match(logical_line): - yield (0, "M316: assertTrue(isinstance(a, b)) sentences not allowed") - - -def assert_equal_in(logical_line): - """Check for assertEqual(True|False, A in B), assertEqual(A in B, True|False) - - M338 - """ - res = (assert_equal_in_start_with_true_or_false_re.search(logical_line) or - assert_equal_in_end_with_true_or_false_re.search(logical_line)) - if res: - yield (0, "M338: Use assertIn/NotIn(A, B) rather than " - "assertEqual(A in B, True/False) when checking collection " - "contents.") - - -def no_xrange(logical_line): - """Disallow 'xrange()' - - M339 - """ - if assert_xrange_re.match(logical_line): - yield(0, "M339: Do not use xrange().") - - -def use_timeutils_utcnow(logical_line, filename): - # tools are OK to use the standard datetime module - if "/tools/" in filename: - return - - msg = "M310: timeutils.utcnow() must be used instead of datetime.%s()" - datetime_funcs = ['now', 'utcnow'] - for f in datetime_funcs: - pos = logical_line.find('datetime.%s' % f) - if pos != -1: - yield (pos, msg % f) - - -def dict_constructor_with_list_copy(logical_line): - msg = ("M336: Must use a dict comprehension instead of a dict constructor" - " with a sequence of key-value pairs." - ) - if dict_constructor_with_list_copy_re.match(logical_line): - yield (0, msg) - - -def no_log_warn(logical_line): - """Disallow 'LOG.warn(' - - Deprecated LOG.warn(), instead use LOG.warning - https://bugs.launchpad.net/magnum/+bug/1508442 - - M352 - """ - - msg = ("M352: LOG.warn is deprecated, please use LOG.warning!") - if "LOG.warn(" in logical_line: - yield (0, msg) - - -def check_explicit_underscore_import(logical_line, filename): - """Check for explicit import of the _ function - - We need to ensure that any files that are using the _() function - to translate logs are explicitly importing the _ function. We - can't trust unit test to catch whether the import has been - added so we need to check for it here. - """ - - # Build a list of the files that have _ imported. No further - # checking needed once it is found. - if filename in UNDERSCORE_IMPORT_FILES: - pass - elif (underscore_import_check.match(logical_line) or - custom_underscore_check.match(logical_line)): - UNDERSCORE_IMPORT_FILES.append(filename) - elif (translated_log.match(logical_line) or - string_translation.match(logical_line)): - yield(0, "M340: Found use of _() without explicit import of _ !") - - -def factory(register): - register(no_mutable_default_args) - register(assert_equal_not_none) - register(assert_true_isinstance) - register(assert_equal_in) - register(use_timeutils_utcnow) - register(dict_constructor_with_list_copy) - register(no_xrange) - register(no_log_warn) - register(check_explicit_underscore_import) diff --git a/magnum/i18n.py b/magnum/i18n.py deleted file mode 100644 index f35c650b..00000000 --- a/magnum/i18n.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See http://docs.openstack.org/developer/oslo.i18n/usage.html . - -""" - -import oslo_i18n - -DOMAIN = 'magnum' - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) - -# The primary translation function using the well-known name "_" -_ = _translators.primary - - -def translate(value, user_locale): - return oslo_i18n.translate(value, user_locale) - - -def get_available_languages(): - return oslo_i18n.get_available_languages(DOMAIN) diff --git a/magnum/objects/__init__.py b/magnum/objects/__init__.py deleted file mode 100644 index 6f33ab6a..00000000 --- a/magnum/objects/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.objects import certificate -from magnum.objects import cluster -from magnum.objects import cluster_template -from magnum.objects import magnum_service -from magnum.objects import quota -from magnum.objects import stats -from magnum.objects import x509keypair - - -Cluster = cluster.Cluster -ClusterTemplate = cluster_template.ClusterTemplate -MagnumService = magnum_service.MagnumService -Quota = quota.Quota -X509KeyPair = x509keypair.X509KeyPair -Certificate = certificate.Certificate -Stats = stats.Stats -__all__ = (Cluster, - ClusterTemplate, - MagnumService, - X509KeyPair, - Certificate, - Stats, - Quota) diff --git a/magnum/objects/base.py b/magnum/objects/base.py deleted file mode 100644 index 995c6d23..00000000 --- a/magnum/objects/base.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Magnum common internal object model""" - -from oslo_versionedobjects import base as ovoo_base -from oslo_versionedobjects import fields as ovoo_fields - - -remotable_classmethod = ovoo_base.remotable_classmethod -remotable = ovoo_base.remotable - - -class MagnumObjectRegistry(ovoo_base.VersionedObjectRegistry): - pass - - -class MagnumObject(ovoo_base.VersionedObject): - """Base class and object factory. - - This forms the base of all objects that can be remoted or instantiated - via RPC. Simply defining a class that inherits from this base class - will make it remotely instantiatable. Objects should implement the - necessary "get" classmethod routines as well as "save" object methods - as appropriate. - """ - - OBJ_SERIAL_NAMESPACE = 'magnum_object' - OBJ_PROJECT_NAMESPACE = 'magnum' - - def as_dict(self): - return {k: getattr(self, k) - for k in self.fields - if self.obj_attr_is_set(k)} - - -class MagnumObjectDictCompat(ovoo_base.VersionedObjectDictCompat): - pass - - -class MagnumPersistentObject(object): - """Mixin class for Persistent objects. - - This adds the fields that we use in common for all persistent objects. - """ - fields = { - 'created_at': ovoo_fields.DateTimeField(nullable=True), - 'updated_at': ovoo_fields.DateTimeField(nullable=True), - } - - -class MagnumObjectIndirectionAPI(ovoo_base.VersionedObjectIndirectionAPI): - def __init__(self): - super(MagnumObjectIndirectionAPI, self).__init__() - from magnum.conductor import api as conductor_api - self._conductor = conductor_api.API() - - def object_action(self, context, objinst, objmethod, args, kwargs): - return self._conductor.object_action(context, objinst, objmethod, - args, kwargs) - - def object_class_action(self, context, objname, objmethod, objver, - args, kwargs): - return self._conductor.object_class_action(context, objname, objmethod, - objver, args, kwargs) - - def object_backport(self, context, objinst, target_version): - return self._conductor.object_backport(context, objinst, - target_version) - - -class MagnumObjectSerializer(ovoo_base.VersionedObjectSerializer): - # Base class to use for object hydration - OBJ_BASE_CLASS = MagnumObject diff --git a/magnum/objects/certificate.py b/magnum/objects/certificate.py deleted file mode 100644 index 9c4a8dbd..00000000 --- a/magnum/objects/certificate.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from magnum.objects import base - - -@base.MagnumObjectRegistry.register -class Certificate(base.MagnumPersistentObject, base.MagnumObject): - # Version 1.0: Initial version - # Version 1.1: Rename bay_uuid to cluster_uuid - VERSION = '1.1' - - fields = { - 'project_id': fields.StringField(nullable=True), - 'user_id': fields.StringField(nullable=True), - 'cluster_uuid': fields.StringField(nullable=True), - 'csr': fields.StringField(nullable=True), - 'pem': fields.StringField(nullable=True), - } - - @classmethod - def from_object_cluster(cls, cluster): - return cls(project_id=cluster.project_id, - user_id=cluster.user_id, - cluster_uuid=cluster.uuid) - - @classmethod - def from_db_cluster(cls, cluster): - return cls(project_id=cluster['project_id'], - user_id=cluster['user_id'], - cluster_uuid=cluster['uuid']) diff --git a/magnum/objects/cluster.py b/magnum/objects/cluster.py deleted file mode 100644 index 9c1e9179..00000000 --- a/magnum/objects/cluster.py +++ /dev/null @@ -1,268 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import strutils -from oslo_utils import uuidutils -from oslo_versionedobjects import fields - -from magnum.common import exception -from magnum.db import api as dbapi -from magnum.objects import base -from magnum.objects.cluster_template import ClusterTemplate -from magnum.objects import fields as m_fields - - -@base.MagnumObjectRegistry.register -class Cluster(base.MagnumPersistentObject, base.MagnumObject, - base.MagnumObjectDictCompat): - # Version 1.0: Initial version - # Version 1.1: Added 'bay_create_timeout' field - # Version 1.2: Add 'registry_trust_id' field - # Version 1.3: Added 'baymodel' field - # Version 1.4: Added more types of status to bay's status field - # Version 1.5: Rename 'registry_trust_id' to 'trust_id' - # Add 'trustee_user_name', 'trustee_password', - # 'trustee_user_id' field - # Version 1.6: Add rollback support for Bay - # Version 1.7: Added 'coe_version' and 'container_version' fields - # Version 1.8: Rename 'baymodel' to 'cluster_template' - # Version 1.9: Rename table name from 'bay' to 'cluster' - # Rename 'baymodel_id' to 'cluster_template_id' - # Rename 'bay_create_timeout' to 'create_timeout' - # Version 1.10: Added 'keypair' field - # Version 1.11: Added 'RESUME_FAILED' in status field - # Version 1.12: Added 'get_stats' method - # Version 1.13: Added get_count_all method - # Version 1.14: Added 'docker_volume_size' field - - VERSION = '1.14' - - dbapi = dbapi.get_instance() - - fields = { - 'id': fields.IntegerField(), - 'uuid': fields.UUIDField(nullable=True), - 'name': fields.StringField(nullable=True), - 'project_id': fields.StringField(nullable=True), - 'user_id': fields.StringField(nullable=True), - 'cluster_template_id': fields.StringField(nullable=True), - 'keypair': fields.StringField(nullable=True), - 'docker_volume_size': fields.IntegerField(nullable=True), - 'stack_id': fields.StringField(nullable=True), - 'status': m_fields.ClusterStatusField(nullable=True), - 'status_reason': fields.StringField(nullable=True), - 'create_timeout': fields.IntegerField(nullable=True), - 'api_address': fields.StringField(nullable=True), - 'node_addresses': fields.ListOfStringsField(nullable=True), - 'node_count': fields.IntegerField(nullable=True), - 'master_count': fields.IntegerField(nullable=True), - 'discovery_url': fields.StringField(nullable=True), - 'master_addresses': fields.ListOfStringsField(nullable=True), - 'ca_cert_ref': fields.StringField(nullable=True), - 'magnum_cert_ref': fields.StringField(nullable=True), - 'cluster_template': fields.ObjectField('ClusterTemplate'), - 'trust_id': fields.StringField(nullable=True), - 'trustee_username': fields.StringField(nullable=True), - 'trustee_password': fields.StringField(nullable=True), - 'trustee_user_id': fields.StringField(nullable=True), - 'coe_version': fields.StringField(nullable=True), - 'container_version': fields.StringField(nullable=True) - } - - @staticmethod - def _from_db_object(cluster, db_cluster): - """Converts a database entity to a formal object.""" - for field in cluster.fields: - if field != 'cluster_template': - cluster[field] = db_cluster[field] - - # Note(eliqiao): The following line needs to be placed outside the - # loop because there is a dependency from cluster_template to - # cluster_template_id. The cluster_template_id must be populated - # first in the loop before it can be used to find the cluster_template. - cluster['cluster_template'] = ClusterTemplate.get_by_uuid( - cluster._context, cluster.cluster_template_id) - - cluster.obj_reset_changes() - return cluster - - @staticmethod - def _from_db_object_list(db_objects, cls, context): - """Converts a list of database entities to a list of formal objects.""" - return [Cluster._from_db_object(cls(context), obj) - for obj in db_objects] - - @base.remotable_classmethod - def get(cls, context, cluster_id): - """Find a cluster based on its id or uuid and return a Cluster object. - - :param cluster_id: the id *or* uuid of a cluster. - :param context: Security context - :returns: a :class:`Cluster` object. - """ - if strutils.is_int_like(cluster_id): - return cls.get_by_id(context, cluster_id) - elif uuidutils.is_uuid_like(cluster_id): - return cls.get_by_uuid(context, cluster_id) - else: - raise exception.InvalidIdentity(identity=cluster_id) - - @base.remotable_classmethod - def get_by_id(cls, context, cluster_id): - """Find a cluster based on its integer id and return a Cluster object. - - :param cluster_id: the id of a cluster. - :param context: Security context - :returns: a :class:`Cluster` object. - """ - db_cluster = cls.dbapi.get_cluster_by_id(context, cluster_id) - cluster = Cluster._from_db_object(cls(context), db_cluster) - return cluster - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - """Find a cluster based on uuid and return a :class:`Cluster` object. - - :param uuid: the uuid of a cluster. - :param context: Security context - :returns: a :class:`Cluster` object. - """ - db_cluster = cls.dbapi.get_cluster_by_uuid(context, uuid) - cluster = Cluster._from_db_object(cls(context), db_cluster) - return cluster - - @base.remotable_classmethod - def get_count_all(cls, context, filters=None): - """Get count of matching clusters. - - :param context: The security context - :param filters: filter dict, can includes 'cluster_template_id', - 'name', 'node_count', 'stack_id', 'api_address', - 'node_addresses', 'project_id', 'user_id', - 'status'(should be a status list), 'master_count'. - :returns: Count of matching clusters. - """ - return cls.dbapi.get_cluster_count_all(context, filters=filters) - - @base.remotable_classmethod - def get_by_name(cls, context, name): - """Find a cluster based on name and return a Cluster object. - - :param name: the logical name of a cluster. - :param context: Security context - :returns: a :class:`Cluster` object. - """ - db_cluster = cls.dbapi.get_cluster_by_name(context, name) - cluster = Cluster._from_db_object(cls(context), db_cluster) - return cluster - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, - sort_key=None, sort_dir=None, filters=None): - """Return a list of Cluster objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: filter dict, can includes 'cluster_template_id', - 'name', 'node_count', 'stack_id', 'api_address', - 'node_addresses', 'project_id', 'user_id', - 'status'(should be a status list), 'master_count'. - :returns: a list of :class:`Cluster` object. - - """ - db_clusters = cls.dbapi.get_cluster_list(context, limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters) - return Cluster._from_db_object_list(db_clusters, cls, context) - - @base.remotable_classmethod - def get_stats(cls, context, project_id=None): - """Return a list of Cluster objects. - - :param context: Security context. - :param project_id: project id - """ - return cls.dbapi.get_cluster_stats(project_id) - - @base.remotable - def create(self, context=None): - """Create a Cluster record in the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Cluster(context) - - """ - values = self.obj_get_changes() - db_cluster = self.dbapi.create_cluster(values) - self._from_db_object(self, db_cluster) - - @base.remotable - def destroy(self, context=None): - """Delete the Cluster from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Cluster(context) - """ - self.dbapi.destroy_cluster(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self, context=None): - """Save updates to this Cluster. - - Updates will be made column by column based on the result - of self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Cluster(context) - """ - updates = self.obj_get_changes() - self.dbapi.update_cluster(self.uuid, updates) - - self.obj_reset_changes() - - @base.remotable - def refresh(self, context=None): - """Loads updates for this Cluster. - - Loads a Cluster with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded Cluster column by column, if there are any updates. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Cluster(context) - """ - current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) - for field in self.fields: - if self.obj_attr_is_set(field) and self[field] != current[field]: - self[field] = current[field] diff --git a/magnum/objects/cluster_template.py b/magnum/objects/cluster_template.py deleted file mode 100644 index ecaff185..00000000 --- a/magnum/objects/cluster_template.py +++ /dev/null @@ -1,241 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import strutils -from oslo_utils import uuidutils -from oslo_versionedobjects import fields - -from magnum.common import exception -from magnum.db import api as dbapi -from magnum.objects import base -from magnum.objects import fields as m_fields - - -@base.MagnumObjectRegistry.register -class ClusterTemplate(base.MagnumPersistentObject, base.MagnumObject, - base.MagnumObjectDictCompat): - # Version 1.0: Initial version - # Version 1.1: Add 'registry_enabled' field - # Version 1.2: Added 'network_driver' field - # Version 1.3: Added 'labels' attribute - # Version 1.4: Added 'insecure' attribute - # Version 1.5: Changed type of 'coe' from StringField to BayTypeField - # Version 1.6: Change 'insecure' to 'tls_disabled' - # Version 1.7: Added 'public' field - # Version 1.8: Added 'server_type' field - # Version 1.9: Added 'volume_driver' field - # Version 1.10: Removed 'ssh_authorized_key' field - # Version 1.11: Added 'insecure_registry' field - # Version 1.12: Added 'docker_storage_driver' field - # Version 1.13: Added 'master_lb_enabled' field - # Version 1.14: Added 'fixed_subnet' field - # Version 1.15: Added 'floating_ip_enabled' field - # Version 1.16: Renamed the class from "BayModel' to 'ClusterTemplate' - # Version 1.17: 'coe' field type change to ClusterTypeField - VERSION = '1.17' - - dbapi = dbapi.get_instance() - - fields = { - 'id': fields.IntegerField(), - 'uuid': fields.StringField(nullable=True), - 'project_id': fields.StringField(nullable=True), - 'user_id': fields.StringField(nullable=True), - 'name': fields.StringField(nullable=True), - 'image_id': fields.StringField(nullable=True), - 'flavor_id': fields.StringField(nullable=True), - 'master_flavor_id': fields.StringField(nullable=True), - 'keypair_id': fields.StringField(nullable=True), - 'dns_nameserver': fields.StringField(nullable=True), - 'external_network_id': fields.StringField(nullable=True), - 'fixed_network': fields.StringField(nullable=True), - 'fixed_subnet': fields.StringField(nullable=True), - 'network_driver': fields.StringField(nullable=True), - 'volume_driver': fields.StringField(nullable=True), - 'apiserver_port': fields.IntegerField(nullable=True), - 'docker_volume_size': fields.IntegerField(nullable=True), - 'docker_storage_driver': m_fields.DockerStorageDriverField( - nullable=True), - 'cluster_distro': fields.StringField(nullable=True), - 'coe': m_fields.ClusterTypeField(nullable=True), - 'http_proxy': fields.StringField(nullable=True), - 'https_proxy': fields.StringField(nullable=True), - 'no_proxy': fields.StringField(nullable=True), - 'registry_enabled': fields.BooleanField(default=False), - 'labels': fields.DictOfStringsField(nullable=True), - 'tls_disabled': fields.BooleanField(default=False), - 'public': fields.BooleanField(default=False), - 'server_type': fields.StringField(nullable=True), - 'insecure_registry': fields.StringField(nullable=True), - 'master_lb_enabled': fields.BooleanField(default=False), - 'floating_ip_enabled': fields.BooleanField(default=True), - } - - @staticmethod - def _from_db_object(cluster_template, db_cluster_template): - """Converts a database entity to a formal object.""" - for field in cluster_template.fields: - cluster_template[field] = db_cluster_template[field] - - cluster_template.obj_reset_changes() - return cluster_template - - @staticmethod - def _from_db_object_list(db_objects, cls, context): - """Converts a list of database entities to a list of formal objects.""" - return [ClusterTemplate._from_db_object(cls(context), obj) for obj in - db_objects] - - @base.remotable_classmethod - def get(cls, context, cluster_template_id): - """Find and return ClusterTemplate object based on its id or uuid. - - :param cluster_template_id: the id *or* uuid of a ClusterTemplate. - :param context: Security context - :returns: a :class:`ClusterTemplate` object. - """ - if strutils.is_int_like(cluster_template_id): - return cls.get_by_id(context, cluster_template_id) - elif uuidutils.is_uuid_like(cluster_template_id): - return cls.get_by_uuid(context, cluster_template_id) - else: - raise exception.InvalidIdentity(identity=cluster_template_id) - - @base.remotable_classmethod - def get_by_id(cls, context, cluster_template_id): - """Find and return ClusterTemplate object based on its integer id. - - :param cluster_template_id: the id of a ClusterTemplate. - :param context: Security context - :returns: a :class:`ClusterTemplate` object. - """ - db_cluster_template = cls.dbapi.get_cluster_template_by_id( - context, cluster_template_id) - cluster_template = ClusterTemplate._from_db_object(cls(context), - db_cluster_template) - return cluster_template - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - """Find and return ClusterTemplate object based on uuid. - - :param uuid: the uuid of a ClusterTemplate. - :param context: Security context - :returns: a :class:`ClusterTemplate` object. - """ - db_cluster_template = cls.dbapi.get_cluster_template_by_uuid( - context, uuid) - cluster_template = ClusterTemplate._from_db_object(cls(context), - db_cluster_template) - return cluster_template - - @base.remotable_classmethod - def get_by_name(cls, context, name): - """Find and return ClusterTemplate object based on name. - - :param name: the name of a ClusterTemplate. - :param context: Security context - :returns: a :class:`ClusterTemplate` object. - """ - db_cluster_template = cls.dbapi.get_cluster_template_by_name(context, - name) - cluster_template = ClusterTemplate._from_db_object(cls(context), - db_cluster_template) - return cluster_template - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, - sort_key=None, sort_dir=None): - """Return a list of ClusterTemplate objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`ClusterTemplate` object. - - """ - db_cluster_templates = cls.dbapi.get_cluster_template_list( - context, limit=limit, marker=marker, sort_key=sort_key, - sort_dir=sort_dir) - return ClusterTemplate._from_db_object_list(db_cluster_templates, - cls, context) - - @base.remotable - def create(self, context=None): - """Create a ClusterTemplate record in the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ClusterTemplate(context) - - """ - values = self.obj_get_changes() - db_cluster_template = self.dbapi.create_cluster_template(values) - self._from_db_object(self, db_cluster_template) - - @base.remotable - def destroy(self, context=None): - """Delete the ClusterTemplate from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ClusterTemplate(context) - """ - self.dbapi.destroy_cluster_template(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self, context=None): - """Save updates to this ClusterTemplate. - - Updates will be made column by column based on the result - of self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ClusterTemplate(context) - """ - updates = self.obj_get_changes() - self.dbapi.update_cluster_template(self.uuid, updates) - - self.obj_reset_changes() - - @base.remotable - def refresh(self, context=None): - """Loads updates for this ClusterTemplate. - - Loads a ClusterTemplate with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded ClusterTemplate column by column, if there are any updates. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ClusterTemplate(context) - """ - current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) - for field in self.fields: - if self.obj_attr_is_set(field) and self[field] != current[field]: - self[field] = current[field] diff --git a/magnum/objects/fields.py b/magnum/objects/fields.py deleted file mode 100644 index 559a4e83..00000000 --- a/magnum/objects/fields.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - - -class ClusterStatus(fields.Enum): - CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS' - CREATE_FAILED = 'CREATE_FAILED' - CREATE_COMPLETE = 'CREATE_COMPLETE' - UPDATE_IN_PROGRESS = 'UPDATE_IN_PROGRESS' - UPDATE_FAILED = 'UPDATE_FAILED' - UPDATE_COMPLETE = 'UPDATE_COMPLETE' - DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS' - DELETE_FAILED = 'DELETE_FAILED' - DELETE_COMPLETE = 'DELETE_COMPLETE' - RESUME_COMPLETE = 'RESUME_COMPLETE' - RESUME_FAILED = 'RESUME_FAILED' - RESTORE_COMPLETE = 'RESTORE_COMPLETE' - ROLLBACK_IN_PROGRESS = 'ROLLBACK_IN_PROGRESS' - ROLLBACK_FAILED = 'ROLLBACK_FAILED' - ROLLBACK_COMPLETE = 'ROLLBACK_COMPLETE' - SNAPSHOT_COMPLETE = 'SNAPSHOT_COMPLETE' - CHECK_COMPLETE = 'CHECK_COMPLETE' - ADOPT_COMPLETE = 'ADOPT_COMPLETE' - - ALL = (CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE, - UPDATE_IN_PROGRESS, UPDATE_FAILED, UPDATE_COMPLETE, - DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE, - RESUME_COMPLETE, RESUME_FAILED, RESTORE_COMPLETE, - ROLLBACK_IN_PROGRESS, ROLLBACK_FAILED, ROLLBACK_COMPLETE, - SNAPSHOT_COMPLETE, CHECK_COMPLETE, ADOPT_COMPLETE) - - STATUS_FAILED = (CREATE_FAILED, UPDATE_FAILED, - DELETE_FAILED, ROLLBACK_FAILED, RESUME_FAILED) - - def __init__(self): - super(ClusterStatus, self).__init__(valid_values=ClusterStatus.ALL) - - -class ContainerStatus(fields.Enum): - ALL = ( - ERROR, RUNNING, STOPPED, PAUSED, UNKNOWN, - ) = ( - 'Error', 'Running', 'Stopped', 'Paused', 'Unknown', - ) - - def __init__(self): - super(ContainerStatus, self).__init__( - valid_values=ContainerStatus.ALL) - - -class ClusterType(fields.Enum): - ALL = ( - KUBERNETES, SWARM, MESOS, DCOS, SWARM_MODE, - ) = ( - 'kubernetes', 'swarm', 'mesos', 'dcos', 'swarm-mode', - ) - - def __init__(self): - super(ClusterType, self).__init__(valid_values=ClusterType.ALL) - - -class DockerStorageDriver(fields.Enum): - ALL = ( - DEVICEMAPPER, OVERLAY, - ) = ( - 'devicemapper', 'overlay', - ) - - def __init__(self): - super(DockerStorageDriver, self).__init__( - valid_values=DockerStorageDriver.ALL) - - -class QuotaResourceName(fields.Enum): - ALL = ( - CLUSTER, - ) = ( - 'Cluster', - ) - - def __init__(self): - super(QuotaResourceName, self).__init__( - valid_values=QuotaResourceName.ALL) - - -class ServerType(fields.Enum): - ALL = ( - VM, BM, - ) = ( - 'vm', 'bm', - ) - - def __init__(self): - super(ServerType, self).__init__( - valid_values=ServerType.ALL) - - -class MagnumServiceState(fields.Enum): - ALL = ( - up, down - ) = ( - 'up', 'down', - ) - - def __init__(self): - super(MagnumServiceState, self).__init__( - valid_values=MagnumServiceState.ALL) - - -class MagnumServiceBinary(fields.Enum): - ALL = ( - magnum_conductor - ) = ( - 'magnum-conductor', - ) - - def __init__(self): - super(MagnumServiceBinary, self).__init__( - valid_values=MagnumServiceBinary.ALL) - - -class ListOfDictsField(fields.AutoTypedField): - AUTO_TYPE = fields.List(fields.Dict(fields.FieldType())) - - -class ClusterStatusField(fields.BaseEnumField): - AUTO_TYPE = ClusterStatus() - - -class MagnumServiceField(fields.BaseEnumField): - AUTO_TYPE = MagnumServiceState() - - -class MagnumServiceBinaryField(fields.BaseEnumField): - AUTO_TYPE = MagnumServiceBinary() - - -class ContainerStatusField(fields.BaseEnumField): - AUTO_TYPE = ContainerStatus() - - -class ClusterTypeField(fields.BaseEnumField): - AUTO_TYPE = ClusterType() - - -class DockerStorageDriverField(fields.BaseEnumField): - AUTO_TYPE = DockerStorageDriver() - - -class ServerTypeField(fields.BaseEnumField): - AUTO_TYPE = ServerType() diff --git a/magnum/objects/magnum_service.py b/magnum/objects/magnum_service.py deleted file mode 100644 index bcc29eba..00000000 --- a/magnum/objects/magnum_service.py +++ /dev/null @@ -1,147 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from magnum.db import api as dbapi -from magnum.objects import base - - -@base.MagnumObjectRegistry.register -class MagnumService(base.MagnumPersistentObject, base.MagnumObject): - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = dbapi.get_instance() - - fields = { - 'id': fields.IntegerField(), - 'host': fields.StringField(nullable=True), - 'binary': fields.StringField(nullable=True), - 'disabled': fields.BooleanField(), - 'disabled_reason': fields.StringField(nullable=True), - 'last_seen_up': fields.DateTimeField(nullable=True), - 'forced_down': fields.BooleanField(), - 'report_count': fields.IntegerField(), - } - - @staticmethod - def _from_db_object(magnum_service, db_magnum_service): - """Converts a database entity to a formal object.""" - for field in magnum_service.fields: - setattr(magnum_service, field, db_magnum_service[field]) - - magnum_service.obj_reset_changes() - return magnum_service - - @staticmethod - def _from_db_object_list(db_objects, cls, context): - """Converts a list of database entities to a list of formal objects.""" - return [MagnumService._from_db_object(cls(context), obj) - for obj in db_objects] - - @base.remotable_classmethod - def get_by_host_and_binary(cls, context, host, binary): - """Find a magnum_service based on its hostname and binary. - - :param host: The host on which the binary is running. - :param binary: The name of the binary. - :param context: Security context. - :returns: a :class:`MagnumService` object. - """ - db_magnum_service = cls.dbapi.get_magnum_service_by_host_and_binary( - host, binary) - if db_magnum_service is None: - return None - magnum_service = MagnumService._from_db_object( - cls(context), db_magnum_service) - return magnum_service - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, - sort_key=None, sort_dir=None): - """Return a list of MagnumService objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`MagnumService` object. - - """ - db_magnum_services = cls.dbapi.get_magnum_service_list( - limit=limit, marker=marker, sort_key=sort_key, - sort_dir=sort_dir) - return MagnumService._from_db_object_list(db_magnum_services, cls, - context) - - @base.remotable - def create(self, context=None): - """Create a MagnumService record in the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: MagnumService(context) - """ - values = self.obj_get_changes() - db_magnum_service = self.dbapi.create_magnum_service(values) - self._from_db_object(self, db_magnum_service) - - @base.remotable - def destroy(self, context=None): - """Delete the MagnumService from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: MagnumService(context) - """ - self.dbapi.destroy_magnum_service(self.id) - self.obj_reset_changes() - - @base.remotable - def save(self, context=None): - """Save updates to this MagnumService. - - Updates will be made column by column based on the result - of self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: MagnumService(context) - """ - updates = self.obj_get_changes() - self.dbapi.update_magnum_service(self.id, updates) - self.obj_reset_changes() - - @base.remotable - def report_state_up(self, context=None): - """Touching the magnum_service record to show aliveness. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: MagnumService(context) - """ - self.report_count += 1 - self.save() diff --git a/magnum/objects/quota.py b/magnum/objects/quota.py deleted file mode 100644 index 287b17e5..00000000 --- a/magnum/objects/quota.py +++ /dev/null @@ -1,142 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from magnum.db import api as dbapi -from magnum.objects import base - - -@base.MagnumObjectRegistry.register -class Quota(base.MagnumPersistentObject, base.MagnumObject, - base.MagnumObjectDictCompat): - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = dbapi.get_instance() - - fields = { - 'id': fields.IntegerField(), - 'project_id': fields.StringField(nullable=False), - 'resource': fields.StringField(nullable=False), - 'hard_limit': fields.IntegerField(nullable=False), - } - - @base.remotable_classmethod - def get_quota_by_project_id_resource(cls, context, project_id, resource): - """Find a quota based on its integer id and return a Quota object. - - :param project_id: the id of a project. - :param resource: resource name. - :param context: Security context - :returns: a :class:`Quota` object. - """ - db_quota = cls.dbapi.get_quota_by_project_id_resource(project_id, - resource) - quota = Quota._from_db_object(cls(context), db_quota) - return quota - - @staticmethod - def _from_db_object(quota, db_quota): - """Converts a database entity to a formal object.""" - for field in quota.fields: - setattr(quota, field, db_quota[field]) - - quota.obj_reset_changes() - return quota - - @staticmethod - def _from_db_object_list(db_objects, cls, context): - """Converts a list of database entities to a list of formal objects.""" - return [Quota._from_db_object(cls(context), obj) - for obj in db_objects] - - @base.remotable_classmethod - def get_by_id(cls, context, quota_id): - """Find a quota based on its integer id and return a Quota object. - - :param quota_id: the id of a quota. - :param context: Security context - :returns: a :class:`Quota` object. - """ - db_quota = cls.dbapi.get_quota_by_id(context, quota_id) - quota = Quota._from_db_object(cls(context), db_quota) - return quota - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, - sort_key=None, sort_dir=None, filters=None): - """Return a list of Quota objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: filter dict, can includes 'project_id', - 'resource'. - :returns: a list of :class:`Quota` object. - - """ - db_quotas = cls.dbapi.get_quota_list(context, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters) - return Quota._from_db_object_list(db_quotas, cls, context) - - @base.remotable_classmethod - def quota_get_all_by_project_id(cls, context, project_id): - """Find a quota based on project id. - - :param project_id: the project id. - :param context: Security context - :returns: a :class:`Quota` object. - """ - quotas = cls.dbapi.get_quota_by_project_id(context, project_id) - return Quota._from_db_object_list(quotas, cls, context) - - @base.remotable - def create(self, context=None): - """Save a quota based on project id. - - :param context: security context. - :returns: a :class:`Quota` object. - """ - values = self.obj_get_changes() - db_quota = self.dbapi.create_quota(values) - self._from_db_object(self, db_quota) - - @base.remotable - def delete(self, context=None): - """Delete the quota from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Quota(context) - """ - self.dbapi.delete_quota(self.project_id, self.resource) - self.obj_reset_changes() - - @base.remotable_classmethod - def update_quota(cls, context, project_id, quota): - """Save a quota based on project id. - - :param quota: quota. - :returns: a :class:`Quota` object. - """ - db_quota = cls.dbapi.update_quota(project_id, quota) - return Quota._from_db_object(cls(context), db_quota) diff --git a/magnum/objects/stats.py b/magnum/objects/stats.py deleted file mode 100644 index 0d800c8f..00000000 --- a/magnum/objects/stats.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_versionedobjects import fields - -from magnum.db import api as dbapi -from magnum.objects import base - - -@base.MagnumObjectRegistry.register -class Stats(base.MagnumObject, base.MagnumObjectDictCompat): - # Version 1.0: Initial version - - VERSION = '1.0' - - dbapi = dbapi.get_instance() - - fields = { - 'clusters': fields.IntegerField(), - 'nodes': fields.IntegerField(nullable=True) - } - - @base.remotable_classmethod - def get_cluster_stats(cls, context, project_id=None): - """Return cluster stats for the given project. - - :param context: The security context - :param project_id: project id - """ - clusters, nodes = cls.dbapi.get_cluster_stats(context, project_id) - return cls(clusters=clusters, nodes=nodes) diff --git a/magnum/objects/x509keypair.py b/magnum/objects/x509keypair.py deleted file mode 100644 index f985276b..00000000 --- a/magnum/objects/x509keypair.py +++ /dev/null @@ -1,192 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import strutils -from oslo_utils import uuidutils -from oslo_versionedobjects import fields - -from magnum.common import exception -from magnum.db import api as dbapi -from magnum.objects import base - - -@base.MagnumObjectRegistry.register -class X509KeyPair(base.MagnumPersistentObject, base.MagnumObject): - # Version 1.0: Initial version - # Version 1.1: Added new method get_x509keypair_by_bay_uuid - # Version 1.2: Remove bay_uuid, name, ca_cert and add intermediates - # and private_key_passphrase - VERSION = '1.2' - - dbapi = dbapi.get_instance() - - fields = { - 'id': fields.IntegerField(), - 'uuid': fields.UUIDField(nullable=True), - 'certificate': fields.StringField(nullable=True), - 'private_key': fields.StringField(nullable=True), - 'intermediates': fields.StringField(nullable=True), - 'private_key_passphrase': fields.StringField(nullable=True), - 'project_id': fields.StringField(nullable=True), - 'user_id': fields.StringField(nullable=True), - } - - @staticmethod - def _from_db_object(x509keypair, db_x509keypair): - """Converts a database entity to a formal object.""" - for field in x509keypair.fields: - setattr(x509keypair, field, db_x509keypair[field]) - - x509keypair.obj_reset_changes() - return x509keypair - - @staticmethod - def _from_db_object_list(db_objects, cls, context): - """Converts a list of database entities to a list of formal objects.""" - return [X509KeyPair._from_db_object(cls(context), obj) - for obj in db_objects] - - @base.remotable_classmethod - def get(cls, context, x509keypair_id): - """Find a X509KeyPair based on its id or uuid. - - Find X509KeyPair by id or uuid and return a X509KeyPair object. - - :param x509keypair_id: the id *or* uuid of a x509keypair. - :param context: Security context - :returns: a :class:`X509KeyPair` object. - """ - if strutils.is_int_like(x509keypair_id): - return cls.get_by_id(context, x509keypair_id) - elif uuidutils.is_uuid_like(x509keypair_id): - return cls.get_by_uuid(context, x509keypair_id) - else: - raise exception.InvalidIdentity(identity=x509keypair_id) - - @base.remotable_classmethod - def get_by_id(cls, context, x509keypair_id): - """Find a X509KeyPair based on its integer id. - - Find X509KeyPair by id and return a X509KeyPair object. - - :param x509keypair_id: the id of a x509keypair. - :param context: Security context - :returns: a :class:`X509KeyPair` object. - """ - db_x509keypair = cls.dbapi.get_x509keypair_by_id(context, - x509keypair_id) - x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair) - return x509keypair - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - """Find a x509keypair based on uuid and return a :class:`X509KeyPair` object. - - :param uuid: the uuid of a x509keypair. - :param context: Security context - :returns: a :class:`X509KeyPair` object. - """ - db_x509keypair = cls.dbapi.get_x509keypair_by_uuid(context, uuid) - x509keypair = X509KeyPair._from_db_object(cls(context), db_x509keypair) - return x509keypair - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, - sort_key=None, sort_dir=None, filters=None): - """Return a list of X509KeyPair objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: filter dict, can include 'x509keypairmodel_id', - 'project_id', 'user_id'. - :returns: a list of :class:`X509KeyPair` object. - - """ - db_x509keypairs = cls.dbapi.get_x509keypair_list(context, limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters) - return X509KeyPair._from_db_object_list(db_x509keypairs, cls, context) - - @base.remotable - def create(self, context=None): - """Create a X509KeyPair record in the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: X509KeyPair(context) - - """ - values = self.obj_get_changes() - db_x509keypair = self.dbapi.create_x509keypair(values) - self._from_db_object(self, db_x509keypair) - - @base.remotable - def destroy(self, context=None): - """Delete the X509KeyPair from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: X509KeyPair(context) - """ - self.dbapi.destroy_x509keypair(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self, context=None): - """Save updates to this X509KeyPair. - - Updates will be made column by column based on the result - of self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: X509KeyPair(context) - """ - updates = self.obj_get_changes() - self.dbapi.update_x509keypair(self.uuid, updates) - - self.obj_reset_changes() - - @base.remotable - def refresh(self, context=None): - """Loads updates for this X509KeyPair. - - Loads a x509keypair with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded x509keypair column by column, if there are any updates. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: X509KeyPair(context) - """ - current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) - for field in self.fields: - if self.obj_attr_is_set(field) and \ - getattr(self, field) != getattr(current, field): - setattr(self, field, getattr(current, field)) diff --git a/magnum/service/__init__.py b/magnum/service/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/service/periodic.py b/magnum/service/periodic.py deleted file mode 100755 index 7cbd286a..00000000 --- a/magnum/service/periodic.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (c) 2015 Intel Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools - -from oslo_log import log -from oslo_service import loopingcall -from oslo_service import periodic_task - -from pycadf import cadftaxonomy as taxonomy - -from magnum.common import context -from magnum.common import profiler -from magnum.common import rpc -from magnum.conductor import monitors -from magnum.conductor import utils as conductor_utils -import magnum.conf -from magnum.drivers.common import driver -from magnum import objects - - -CONF = magnum.conf.CONF -LOG = log.getLogger(__name__) - - -def set_context(func): - @functools.wraps(func) - def handler(self, ctx): - ctx = context.make_admin_context(all_tenants=True) - context.set_ctx(ctx) - func(self, ctx) - context.set_ctx(None) - return handler - - -class ClusterUpdateJob(object): - - status_to_event = { - objects.fields.ClusterStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE, - objects.fields.ClusterStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE, - objects.fields.ClusterStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE, - objects.fields.ClusterStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE, - objects.fields.ClusterStatus.CREATE_FAILED: taxonomy.ACTION_CREATE, - objects.fields.ClusterStatus.DELETE_FAILED: taxonomy.ACTION_DELETE, - objects.fields.ClusterStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE, - objects.fields.ClusterStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE - } - - def __init__(self, ctx, cluster): - self.ctx = ctx - self.cluster = cluster - - def update_status(self): - LOG.debug("Updating status for cluster %s", self.cluster.id) - # get the driver for the cluster - cdriver = driver.Driver.get_driver_for_cluster(self.ctx, self.cluster) - # ask the driver to sync status - cdriver.update_cluster_status(self.ctx, self.cluster) - LOG.debug("Status for cluster %s updated to %s (%s)", - self.cluster.id, self.cluster.status, - self.cluster.status_reason) - # status update notifications - if self.cluster.status.endswith("_COMPLETE"): - conductor_utils.notify_about_cluster_operation( - self.ctx, self.status_to_event[self.cluster.status], - taxonomy.OUTCOME_SUCCESS) - if self.cluster.status.endswith("_FAILED"): - conductor_utils.notify_about_cluster_operation( - self.ctx, self.status_to_event[self.cluster.status], - taxonomy.OUTCOME_FAILURE) - # if we're done with it, delete it - if self.cluster.status == objects.fields.ClusterStatus.DELETE_COMPLETE: - self.cluster.destroy() - # end the "loop" - raise loopingcall.LoopingCallDone() - - -@profiler.trace_cls("rpc") -class MagnumPeriodicTasks(periodic_task.PeriodicTasks): - '''Magnum periodic Task class - - Any periodic task job need to be added into this class - - NOTE(suro-patz): - - oslo_service.periodic_task runs tasks protected within try/catch - block, with default raise_on_error as 'False', in run_periodic_tasks(), - which ensures the process does not die, even if a task encounters an - Exception. - - The periodic tasks here does not necessarily need another - try/catch block. The present try/catch block here helps putting - magnum-periodic-task-specific log/error message. - - ''' - - def __init__(self, conf): - super(MagnumPeriodicTasks, self).__init__(conf) - self.notifier = rpc.get_notifier() - - @periodic_task.periodic_task(spacing=10, run_immediately=True) - @set_context - def sync_cluster_status(self, ctx): - try: - LOG.debug('Starting to sync up cluster status') - - # get all the clusters that are IN_PROGRESS - status = [objects.fields.ClusterStatus.CREATE_IN_PROGRESS, - objects.fields.ClusterStatus.UPDATE_IN_PROGRESS, - objects.fields.ClusterStatus.DELETE_IN_PROGRESS, - objects.fields.ClusterStatus.ROLLBACK_IN_PROGRESS] - filters = {'status': status} - clusters = objects.Cluster.list(ctx, filters=filters) - if not clusters: - return - - # synchronize with underlying orchestration - for cluster in clusters: - job = ClusterUpdateJob(ctx, cluster) - # though this call isn't really looping, we use this - # abstraction anyway to avoid dealing directly with eventlet - # hooey - lc = loopingcall.FixedIntervalLoopingCall(f=job.update_status) - lc.start(1, stop_on_exception=True) - - except Exception as e: - LOG.warning( - "Ignore error [%s] when syncing up cluster status.", - e, exc_info=True) - - @periodic_task.periodic_task(run_immediately=True) - @set_context - def _send_cluster_metrics(self, ctx): - LOG.debug('Starting to send cluster metrics') - for cluster in objects.Cluster.list(ctx): - if cluster.status not in ( - objects.fields.ClusterStatus.CREATE_COMPLETE, - objects.fields.ClusterStatus.UPDATE_COMPLETE): - continue - - monitor = monitors.create_monitor(ctx, cluster) - if monitor is None: - continue - - try: - monitor.pull_data() - except Exception as e: - LOG.warning( - "Skip pulling data from cluster %(cluster)s due to " - "error: %(e)s", - {'e': e, 'cluster': cluster.uuid}, exc_info=True) - continue - - metrics = list() - for name in monitor.get_metric_names(): - try: - metric = { - 'name': name, - 'value': monitor.compute_metric_value(name), - 'unit': monitor.get_metric_unit(name), - } - metrics.append(metric) - except Exception as e: - LOG.warning("Skip adding metric %(name)s due to " - "error: %(e)s", - {'e': e, 'name': name}, exc_info=True) - - message = dict(metrics=metrics, - user_id=cluster.user_id, - project_id=cluster.project_id, - resource_id=cluster.uuid) - LOG.debug("About to send notification: '%s'", message) - self.notifier.info(ctx, "magnum.cluster.metrics.update", - message) - - -def setup(conf, tg): - pt = MagnumPeriodicTasks(conf) - tg.add_dynamic_timer( - pt.run_periodic_tasks, - periodic_interval_max=conf.periodic_interval_max, - context=None) diff --git a/magnum/servicegroup/__init__.py b/magnum/servicegroup/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/servicegroup/magnum_service_periodic.py b/magnum/servicegroup/magnum_service_periodic.py deleted file mode 100644 index f6d55b2b..00000000 --- a/magnum/servicegroup/magnum_service_periodic.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2015 - Yahoo! Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Magnum Service Layer""" - -from oslo_log import log -from oslo_service import periodic_task - -from magnum import objects -from magnum.service import periodic - - -LOG = log.getLogger(__name__) - - -class MagnumServicePeriodicTasks(periodic_task.PeriodicTasks): - '''Magnum periodic Task class - - Any periodic task job need to be added into this class - ''' - - def __init__(self, conf, binary): - self.magnum_service_ref = None - self.host = conf.host - self.binary = binary - super(MagnumServicePeriodicTasks, self).__init__(conf) - - @periodic_task.periodic_task(run_immediately=True) - @periodic.set_context - def update_magnum_service(self, ctx): - LOG.debug('Update magnum_service') - if self.magnum_service_ref is None: - self.magnum_service_ref = \ - objects.MagnumService.get_by_host_and_binary( - ctx, self.host, self.binary) - if self.magnum_service_ref is None: - magnum_service_dict = { - 'host': self.host, - 'binary': self.binary - } - self.magnum_service_ref = objects.MagnumService( - ctx, **magnum_service_dict) - self.magnum_service_ref.create() - self.magnum_service_ref.report_state_up() - - -def setup(conf, binary, tg): - pt = MagnumServicePeriodicTasks(conf, binary) - tg.add_dynamic_timer( - pt.run_periodic_tasks, - periodic_interval_max=conf.periodic_interval_max, - context=None) diff --git a/magnum/tests/__init__.py b/magnum/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/base.py b/magnum/tests/base.py deleted file mode 100644 index dbd4db22..00000000 --- a/magnum/tests/base.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import os - -import fixtures -import mock -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslotest import base -import pecan -import testscenarios - -from magnum.common import context as magnum_context -from magnum.common import keystone as magnum_keystone -from magnum.objects import base as objects_base -from magnum.tests import conf_fixture -from magnum.tests import fake_notifier -from magnum.tests import policy_fixture - - -CONF = cfg.CONF -try: - log.register_options(CONF) -except cfg.ArgsAlreadyParsedError: - pass -CONF.set_override('use_stderr', False) - - -class BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase): - """Test base class.""" - - def setUp(self): - super(BaseTestCase, self).setUp() - self.addCleanup(cfg.CONF.reset) - - -class TestCase(base.BaseTestCase): - """Test case base class for all unit tests.""" - - def setUp(self): - super(TestCase, self).setUp() - token_info = { - 'token': { - 'project': { - 'id': 'fake_project' - }, - 'user': { - 'id': 'fake_user' - } - } - } - - trustee_domain_id = '12345678-9012-3456-7890-123456789abc' - - self.context = magnum_context.RequestContext( - auth_token_info=token_info, - project_id='fake_project', - user_id='fake_user', - is_admin=False) - - self.global_mocks = {} - - self.keystone_client = magnum_keystone.KeystoneClientV3(self.context) - - self.policy = self.useFixture(policy_fixture.PolicyFixture()) - - self.useFixture(fixtures.MockPatchObject( - oslo_messaging, 'Notifier', - fake_notifier.FakeNotifier)) - self.addCleanup(fake_notifier.reset) - - def make_context(*args, **kwargs): - # If context hasn't been constructed with token_info - if not kwargs.get('auth_token_info'): - kwargs['auth_token_info'] = copy.deepcopy(token_info) - if not kwargs.get('project_id'): - kwargs['project_id'] = 'fake_project' - if not kwargs.get('user_id'): - kwargs['user_id'] = 'fake_user' - if not kwargs.get('is_admin'): - kwargs['is_admin'] = False - - context = magnum_context.RequestContext(*args, **kwargs) - return magnum_context.RequestContext.from_dict(context.to_dict()) - - p = mock.patch.object(magnum_context, 'make_context', - side_effect=make_context) - - self.global_mocks['magnum.common.context.make_context'] = p - - q = mock.patch.object(magnum_keystone.KeystoneClientV3, - 'trustee_domain_id', - return_value=trustee_domain_id) - - self.global_mocks[ - 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id'] = q - - self.mock_make_context = p.start() - self.addCleanup(p.stop) - - self.mock_make_trustee_domain_id = q.start() - self.addCleanup(q.stop) - - self.useFixture(conf_fixture.ConfFixture()) - self.useFixture(fixtures.NestedTempfile()) - - self._base_test_obj_backup = copy.copy( - objects_base.MagnumObjectRegistry._registry._obj_classes) - self.addCleanup(self._restore_obj_registry) - - def reset_pecan(): - pecan.set_config({}, overwrite=True) - - self.addCleanup(reset_pecan) - - def start_global(self, name): - self.global_mocks[name].start() - - def stop_global(self, name): - self.global_mocks[name].stop() - - def _restore_obj_registry(self): - objects_base.MagnumObjectRegistry._registry._obj_classes \ - = self._base_test_obj_backup - - def config(self, **kw): - """Override config options for a test.""" - group = kw.pop('group', None) - for k, v in kw.items(): - CONF.set_override(k, v, group) - - def get_path(self, project_file=None): - """Get the absolute path to a file. Used for testing the API. - - :param project_file: File whose path to return. Default: None. - :returns: path to the specified file, or path to project root. - """ - root = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..', - ) - ) - if project_file: - return os.path.join(root, project_file) - else: - return root diff --git a/magnum/tests/conf_fixture.py b/magnum/tests/conf_fixture.py deleted file mode 100644 index 8e888fe1..00000000 --- a/magnum/tests/conf_fixture.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - -from magnum.common import config -import magnum.conf - -CONF = magnum.conf.CONF - - -class ConfFixture(fixtures.Fixture): - """Fixture to manage global conf settings.""" - - def _setUp(self): - CONF.set_default('host', 'fake-mini') - CONF.set_default('connection', "sqlite://", group='database') - CONF.set_default('sqlite_synchronous', False, group='database') - config.parse_args([], default_config_files=[]) - self.addCleanup(CONF.reset) diff --git a/magnum/tests/contrib/copy_instance_logs.sh b/magnum/tests/contrib/copy_instance_logs.sh deleted file mode 100755 index e98dc4ed..00000000 --- a/magnum/tests/contrib/copy_instance_logs.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - -echo "Magnum's copy_instance_logs.sh was called..." - -SSH_IP=$1 -COE=${2-kubernetes} -NODE_TYPE=${3-master} -LOG_PATH=/opt/stack/logs/cluster-nodes/${NODE_TYPE}-${SSH_IP} -KEYPAIR=${4-default} -PRIVATE_KEY= - -echo "If private key is specified, save to temp and use that; else, use default" -if [[ "$KEYPAIR" == "default" ]]; then - PRIVATE_KEY=$(readlink -f ~/.ssh/id_rsa_magnum) -else - PRIVATE_KEY="$(mktemp id_rsa_magnum.$SSH_IP.XXX)" - echo -en "$KEYPAIR" > $PRIVATE_KEY -fi - -function remote_exec { - local ssh_user=$1 - local cmd=$2 - local logfile=${LOG_PATH}/$3 - ssh -i $PRIVATE_KEY -o StrictHostKeyChecking=no ${ssh_user}@${SSH_IP} "${cmd}" > ${logfile} 2>&1 -} - -mkdir -p $LOG_PATH - -cat /proc/cpuinfo > /opt/stack/logs/cpuinfo.log - -if [[ "$COE" == "kubernetes" ]]; then - SSH_USER=fedora - remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log - remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log - remote_exec $SSH_USER "sudo journalctl -u cloud-final --no-pager" cloud-final.log - remote_exec $SSH_USER "sudo journalctl -u cloud-init-local --no-pager" cloud-init-local.log - remote_exec $SSH_USER "sudo journalctl -u cloud-init --no-pager" cloud-init.log - remote_exec $SSH_USER "sudo cat /var/log/cloud-init-output.log" cloud-init-output.log - remote_exec $SSH_USER "sudo journalctl -u kubelet --no-pager" kubelet.log - remote_exec $SSH_USER "sudo journalctl -u kube-proxy --no-pager" kube-proxy.log - remote_exec $SSH_USER "sudo journalctl -u etcd --no-pager" etcd.log - remote_exec $SSH_USER "sudo journalctl -u kube-apiserver --no-pager" kube-apiserver.log - remote_exec $SSH_USER "kubectl logs --namespace=kube-system \$(kubectl --namespace=kube-system get pods | grep kube-scheduler | awk '{print \$1}')" kube-scheduler.log - remote_exec $SSH_USER "kubectl logs --namespace=kube-system \$(kubectl --namespace=kube-system get pods | grep kube-controller-manager | awk '{print \$1}')" kube-controller-manager.log - remote_exec $SSH_USER "sudo journalctl -u docker-storage-setup --no-pager" docker-storage-setup.log - remote_exec $SSH_USER "sudo systemctl status docker-storage-setup -l" docker-storage-setup.service.status.log - remote_exec $SSH_USER "sudo systemctl show docker-storage-setup --no-pager" docker-storage-setup.service.show.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage-setup 2>/dev/null" docker-storage-setup.sysconfig.env.log - remote_exec $SSH_USER "sudo journalctl -u docker --no-pager" docker.log - remote_exec $SSH_USER "sudo systemctl status docker -l" docker.service.status.log - remote_exec $SSH_USER "sudo systemctl show docker --no-pager" docker.service.show.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker" docker.sysconfig.env.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage" docker-storage.sysconfig.env.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-network" docker-network.sysconfig.env.log - remote_exec $SSH_USER "sudo timeout 60s docker ps --all=true --no-trunc=true" docker-containers.log - remote_exec $SSH_USER "sudo tar zcvf - /var/lib/docker/containers 2>/dev/null" docker-container-configs.tar.gz - remote_exec $SSH_USER "sudo journalctl -u flanneld --no-pager" flanneld.log - remote_exec $SSH_USER "sudo ip a" ipa.log - remote_exec $SSH_USER "sudo netstat -an" netstat.log - remote_exec $SSH_USER "sudo df -h" dfh.log - remote_exec $SSH_USER "sudo journalctl -u wc-notify --no-pager" wc-notify.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/heat-params" heat-params -elif [[ "$COE" == "swarm" || "$COE" == "swarm-mode" ]]; then - SSH_USER=fedora - remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log - remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log - remote_exec $SSH_USER "sudo journalctl -u cloud-final --no-pager" cloud-final.log - remote_exec $SSH_USER "sudo journalctl -u cloud-init-local --no-pager" cloud-init-local.log - remote_exec $SSH_USER "sudo journalctl -u cloud-init --no-pager" cloud-init.log - remote_exec $SSH_USER "sudo cat /var/log/cloud-init-output.log" cloud-init-output.log - remote_exec $SSH_USER "sudo journalctl -u etcd --no-pager" etcd.log - remote_exec $SSH_USER "sudo journalctl -u swarm-manager --no-pager" swarm-manager.log - remote_exec $SSH_USER "sudo journalctl -u swarm-agent --no-pager" swarm-agent.log - remote_exec $SSH_USER "sudo journalctl -u swarm-worker --no-pager" swarm-worker.log - remote_exec $SSH_USER "sudo journalctl -u docker-storage-setup --no-pager" docker-storage-setup.log - remote_exec $SSH_USER "sudo systemctl status docker-storage-setup -l" docker-storage-setup.service.status.log - remote_exec $SSH_USER "sudo systemctl show docker-storage-setup --no-pager" docker-storage-setup.service.show.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage-setup 2>/dev/null" docker-storage-setup.sysconfig.env.log - remote_exec $SSH_USER "sudo journalctl -u docker --no-pager" docker.log - remote_exec $SSH_USER "sudo journalctl -u docker-containerd --no-pager" docker-containerd.log - remote_exec $SSH_USER "sudo systemctl status docker.socket -l" docker.socket.status.log - remote_exec $SSH_USER "sudo systemctl show docker.socket --no-pager" docker.socket.show.log - remote_exec $SSH_USER "sudo systemctl status docker -l" docker.service.status.log - remote_exec $SSH_USER "sudo systemctl show docker --no-pager" docker.service.show.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker" docker.sysconfig.env.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage" docker-storage.sysconfig.env.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-network" docker-network.sysconfig.env.log - remote_exec $SSH_USER "sudo timeout 60s docker ps --all=true --no-trunc=true" docker-containers.log - remote_exec $SSH_USER "sudo tar zcvf - /var/lib/docker/containers 2>/dev/null" docker-container-configs.tar.gz - remote_exec $SSH_USER "sudo journalctl -u flanneld --no-pager" flanneld.log - remote_exec $SSH_USER "sudo ip a" ipa.log - remote_exec $SSH_USER "sudo netstat -an" netstat.log - remote_exec $SSH_USER "sudo df -h" dfh.log - remote_exec $SSH_USER "sudo cat /etc/sysconfig/heat-params" heat-params -else - echo "ERROR: Unknown COE '${COE}'" - EXIT_CODE=1 -fi - -# Restore xtrace -$XTRACE - -exit $EXIT_CODE diff --git a/magnum/tests/contrib/gate_hook.sh b/magnum/tests/contrib/gate_hook.sh deleted file mode 100755 index 3abdbec0..00000000 --- a/magnum/tests/contrib/gate_hook.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -x -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# This script is executed inside gate_hook function in devstack gate. - - -coe=$1 -special=$2 - -export PROJECTS="openstack/barbican $PROJECTS" -export DEVSTACK_LOCAL_CONFIG="enable_plugin heat git://git.openstack.org/openstack/heat" - -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service horizon" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-account" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-container" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-object" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-proxy" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-acentral" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-acompute" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-alarm-evaluator" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-alarm-notifier" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-api" -export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service ceilometer-collector" - -if egrep --quiet '(vmx|svm)' /proc/cpuinfo; then - export DEVSTACK_GATE_LIBVIRT_TYPE=kvm -fi - - -if [ "$coe" = "mesos" ]; then - export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL=https://fedorapeople.org/groups/magnum/ubuntu-mesos-ocata.qcow2" -elif [ "$coe" = "k8s-coreos" ]; then - export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL=http://beta.release.core-os.net/amd64-usr/current/coreos_production_openstack_image.img.bz2" -elif [ "${coe}${special}" = "k8s-ironic" ]; then - export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL='https://fedorapeople.org/groups/magnum/fedora-kubernetes-ironic-latest.tar.gz'" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_IMAGE_NAME='fedora-kubernetes-ironic-latest'" - - export DEVSTACK_GATE_VIRT_DRIVER="ironic" - # NOTE(strigazi) keep cinder - # export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service cinder c-sch c-api c-vol" - - export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic git://git.openstack.org/openstack/ironic" - - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEPLOY_DRIVER=pxe_ipmitool" - # NOTE(ykarel) Ironic to work with magnum, requires devstack to be configured with IP_VERSION=4 - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IP_VERSION=4" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BAREMETAL_BASIC_OPS=True" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_LOG_DIR=/opt/stack/new/ironic-bm-logs" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"DEFAULT_INSTANCE_TYPE=baremetal" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"BUILD_TIMEOUT=600" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_CALLBACK_TIMEOUT=600" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_AGENT=openvswitch" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_ML2_TENANT_NETWORK_TYPE=vxlan" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BUILD_DEPLOY_RAMDISK=False" - - # We don't enable swift in Gate Jobs so not required - # export DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_ENABLE_TEMPURLS=True" - # export DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_TEMPURL_KEY=password" - # export DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_HASH=password" - - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_ENABLED_DRIVERS=fake,agent_ipmitool,pxe_ipmitool" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"VOLUME_BACKING_FILE_SIZE=24G" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"FORCE_CONFIG_DRIVE=True" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_IPXE_ENABLED=False" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_COUNT=2" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SSH_PORT=22" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=1024" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_DISK=10" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=5" -else - export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL='https://fedorapeople.org/groups/magnum/fedora-atomic-latest.qcow2'" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_IMAGE_NAME='fedora-atomic-latest'" -fi - -# Enable magnum plugin in the last step -export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin magnum git://git.openstack.org/openstack/magnum" - -$BASE/new/devstack-gate/devstack-vm-gate.sh diff --git a/magnum/tests/contrib/post_test_hook.sh b/magnum/tests/contrib/post_test_hook.sh deleted file mode 100755 index 7ac82f85..00000000 --- a/magnum/tests/contrib/post_test_hook.sh +++ /dev/null @@ -1,236 +0,0 @@ -#!/bin/bash -x -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -# Sleep some time until all services are starting -sleep 5 - -# Check if a function already exists -function function_exists { - declare -f -F $1 > /dev/null -} - -# Set up all necessary test data -function create_test_data { - # First we test Magnum's command line to see if we can stand up - # a cluster_template, cluster and a pod - - coe=$1 - special=$2 - if [ $coe == 'mesos' ]; then - local image_name="ubuntu.*mesos" - local container_format="bare" - elif [ $coe == 'k8s-coreos' ]; then - local image_name="coreos" - local container_format="bare" - elif [ "${coe}${special}" == 'k8s-ironic' ]; then - local bm_flavor_id=$(openstack flavor show baremetal -f value -c id) - die_if_not_set $LINENO bm_flavor_id "Failed to get id of baremetal flavor" - - # NOTE(yuanying): Workaround fix for ironic issue - # cf. https://bugs.launchpad.net/ironic/+bug/1596421 - echo "alter table ironic.nodes modify instance_info LONGTEXT;" | mysql -uroot -p${MYSQL_PASSWORD} ironic - # NOTE(yuanying): Ironic instances need to connect to Internet - openstack subnet set private-subnet --dns-nameserver 8.8.8.8 - local container_format="ami" - else - local image_name="atomic" - local container_format="bare" - fi - - # if we have the MAGNUM_IMAGE_NAME setting, use it instead - # of the default one. In combination with MAGNUM_GUEST_IMAGE_URL - # setting, it allows to perform testing on custom images. - image_name=${MAGNUM_IMAGE_NAME:-$image_name} - - export NIC_ID=$(openstack network show public -f value -c id) - - # We need to filter by container_format to get the appropriate - # image. Specifically, when we provide kernel and ramdisk images - # we need to select the 'ami' image. Otherwise, when we have - # qcow2 images, the format is 'bare'. - export IMAGE_ID=$(openstack image list --property container_format=$container_format | grep -i $image_name | awk '{print $2}') - - #Get magnum_url - local magnum_api_ip=$(iniget /etc/magnum/magnum.conf api host) - local magnum_api_port=$(iniget /etc/magnum/magnum.conf api port) - local magnum_url="http://"$magnum_api_ip":"$magnum_api_port"/v1" - local keystone_auth_url=$(iniget /etc/magnum/magnum.conf keystone_authtoken auth_uri) - - # pass the appropriate variables via a config file - CREDS_FILE=$MAGNUM_DIR/functional_creds.conf - cat < $CREDS_FILE -# Credentials for functional testing - -[auth] -auth_url = $keystone_auth_url -magnum_url = $magnum_url -username = $OS_USERNAME -project_name = $OS_PROJECT_NAME -project_domain_id = $OS_PROJECT_DOMAIN_ID -user_domain_id = $OS_USER_DOMAIN_ID -password = $OS_PASSWORD -auth_version = v3 -insecure = False -[admin] -user = $OS_USERNAME -project_name = $OS_PROJECT_NAME -project_domain_id = $OS_PROJECT_DOMAIN_ID -user_domain_id = $OS_USER_DOMAIN_ID -pass = $OS_PASSWORD -region_name = $OS_REGION_NAME -[magnum] -image_id = $IMAGE_ID -nic_id = $NIC_ID -keypair_id = default -flavor_id = ${bm_flavor_id:-s1.magnum} -master_flavor_id = ${bm_flavor_id:-m1.magnum} -copy_logs = true -dns_nameserver = 8.8.8.8 -EOF - - # Note(eliqiao): Let's keep this only for debugging on gate. - echo_summary $CREDS_FILE - cat $CREDS_FILE - - # Create a keypair for use in the functional tests. - echo_summary "Generate a key-pair" - # ~/.ssh/id_rsa already exists in multinode setup, so generate - # key with different name - ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa_magnum - openstack keypair create --public-key ~/.ssh/id_rsa_magnum.pub default -} - -function add_flavor { - # because of policy.json change in nova, flavor-create is now an admin-only feature - # moving this out to only be used by admins - - # Get admin credentials - pushd ../devstack - source openrc admin admin - popd - - # Create magnum specific flavor for use in functional tests. - echo_summary "Create a flavor" - if [[ "$DEVSTACK_GATE_TOPOLOGY" = "multinode" ]] ; then - local flavor_ram="3750" - local flavor_disk="20" - local flavor_vcpus="2" - fi - - openstack flavor create m1.magnum --id 100 --ram ${flavor_ram:-1024} --disk ${flavor_disk:-10} --vcpus ${flavor_vcpus:-1} - openstack flavor create s1.magnum --id 200 --ram ${flavor_ram:-1024} --disk ${flavor_disk:-10} --vcpus ${flavor_vcpus:-1} -} - -if ! function_exists echo_summary; then - function echo_summary { - echo $@ - } -fi - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - -echo_summary "magnum's post_test_hook.sh was called..." -(set -o posix; set) - -# source it to make sure to get REQUIREMENTS_DIR -source $BASE/new/devstack/stackrc - -constraints="-c $REQUIREMENTS_DIR/upper-constraints.txt" -sudo -H pip install $constraints -U -r requirements.txt -r test-requirements.txt - -export MAGNUM_DIR="$BASE/new/magnum" -sudo chown -R jenkins:stack $MAGNUM_DIR - -# Run functional tests -# Currently we support functional-api, functional-k8s, will support swarm, -# mesos later. - -echo "Running magnum functional test suite for $1" - -# For api, we will run tempest tests - -coe=$1 -special=$2 - -if [[ "-ironic" != "$special" ]]; then - add_flavor -fi - -if [[ "api" == "$coe" ]]; then - # Import devstack functions 'iniset', 'iniget' and 'trueorfalse' - source $BASE/new/devstack/functions - echo "TEMPEST_SERVICES+=,magnum" >> $localrc_path - pushd $BASE/new/tempest - sudo chown -R jenkins:stack $BASE/new/tempest - - # Set demo credentials - source $BASE/new/devstack/accrc/demo/demo - - create_test_data $coe - - # Set up tempest config with magnum goodness - iniset $BASE/new/tempest/etc/tempest.conf magnum image_id $IMAGE_ID - iniset $BASE/new/tempest/etc/tempest.conf magnum nic_id $NIC_ID - iniset $BASE/new/tempest/etc/tempest.conf magnum keypair_id default - iniset $BASE/new/tempest/etc/tempest.conf magnum flavor_id s1.magnum - iniset $BASE/new/tempest/etc/tempest.conf magnum master_flavor_id m1.magnum - iniset $BASE/new/tempest/etc/tempest.conf magnum copy_logs True - - # show tempest config with magnum - cat etc/tempest.conf - - # Set up concurrency and test regex - export MAGNUM_TEMPEST_CONCURRENCY=${MAGNUM_TEMPEST_CONCURRENCY:-1} - export MAGNUM_TESTS=${MAGNUM_TESTS:-'magnum.tests.functional.api.v1'} - - echo "Running tempest magnum test suites" - sudo -H -u jenkins tox -eall-plugin -- $MAGNUM_TESTS --concurrency=$MAGNUM_TEMPEST_CONCURRENCY -else - # Get admin credentials - pushd ../devstack - source openrc admin admin - popd - - create_test_data $coe $special - - target="${coe}${special}" - sudo -E -H -u jenkins tox -e functional-"$target" -- --concurrency=1 -fi -EXIT_CODE=$? - -# Delete the keypair used in the functional test. -echo_summary "Running keypair-delete" -openstack keypair delete default - -if [[ "-ironic" != "$special" ]]; then - # Delete the flavor used in the functional test. - echo_summary "Running flavor-delete" - openstack flavor delete m1.magnum - openstack flavor delete s1.magnum -fi - -# Save functional testing log -sudo cp $MAGNUM_DIR/functional-tests.log /opt/stack/logs/ - -# Save functional_creds.conf -sudo cp $CREDS_FILE /opt/stack/logs/ - -# Restore xtrace -$XTRACE - -exit $EXIT_CODE diff --git a/magnum/tests/fake_notifier.py b/magnum/tests/fake_notifier.py deleted file mode 100644 index 69bc264e..00000000 --- a/magnum/tests/fake_notifier.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2016 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import functools - - -NOTIFICATIONS = [] - - -def reset(): - del NOTIFICATIONS[:] - - -FakeMessage = collections.namedtuple('Message', [ - 'publisher_id', 'priority', 'event_type', 'payload', 'context']) - - -class FakeNotifier(object): - - def __init__(self, transport, publisher_id=None, driver=None, - topic=None, serializer=None, retry=None): - self.transport = transport - self.publisher_id = publisher_id or 'fake.id' - for priority in ('debug', 'info', 'warn', 'error', 'critical'): - setattr( - self, priority, - functools.partial(self._notify, priority=priority.upper())) - - def prepare(self, publisher_id=None): - if publisher_id is None: - publisher_id = self.publisher_id - return self.__class__(self.transport, publisher_id=publisher_id) - - def _notify(self, ctxt, event_type, payload, priority): - msg = FakeMessage(self.publisher_id, priority, event_type, - payload, ctxt) - NOTIFICATIONS.append(msg) diff --git a/magnum/tests/fake_policy.py b/magnum/tests/fake_policy.py deleted file mode 100644 index b051e5cb..00000000 --- a/magnum/tests/fake_policy.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -policy_data = """ -{ - "context_is_admin": "role:admin", - "admin_or_owner": "is_admin:True or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - "admin_api": "rule:context_is_admin", - - "bay:create": "", - "bay:delete": "", - "bay:detail": "", - "bay:get": "", - "bay:get_all": "", - "bay:update": "", - - "baymodel:create": "", - "baymodel:delete": "", - "baymodel:detail": "", - "baymodel:get": "", - "baymodel:get_all": "", - "baymodel:update": "", - - "cluster:create": "", - "cluster:delete": "", - "cluster:detail": "", - "cluster:get": "", - "cluster:get_all": "", - "cluster:update": "", - - "clustertemplate:create": "", - "clustertemplate:delete": "", - "clustertemplate:detail": "", - "clustertemplate:get": "", - "clustertemplate:get_all": "", - "clustertemplate:update": "", - "clustertemplate:publish": "", - - "certificate:create": "", - "certificate:get": "", - - "magnum-service:get_all": "", - "stats:get_all": "" -} -""" diff --git a/magnum/tests/fakes.py b/magnum/tests/fakes.py deleted file mode 100644 index 36b5f55c..00000000 --- a/magnum/tests/fakes.py +++ /dev/null @@ -1,126 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import time - -from oslo_service import loopingcall - -fakeAuthTokenHeaders = {'X-User-Id': u'773a902f022949619b5c2f32cd89d419', - 'X-Project-Id': u'5588aebbcdc24e17a061595f80574376', - 'X-Project-Name': 'test', - 'X-User-Name': 'test', - 'X-Auth-Token': u'5588aebbcdc24e17a061595f80574376', - 'X-Forwarded-For': u'10.10.10.10, 11.11.11.11', - 'X-Service-Catalog': u'{test: 12345}', - 'X-Roles': 'role1,role2', - 'X-Auth-Url': 'fake_auth_url', - 'X-Identity-Status': 'Confirmed', - 'X-User-Domain-Name': 'domain', - 'X-Project-Domain-Id': 'project_domain_id', - 'X-User-Domain-Id': 'user_domain_id', - 'OpenStack-API-Version': 'container-infra 1.0' - } - - -class FakePecanRequest(mock.Mock): - - def __init__(self, **kwargs): - super(FakePecanRequest, self).__init__(**kwargs) - self.host_url = 'http://test_url:8080/test' - self.context = {} - self.body = '' - self.content_type = 'text/unicode' - self.params = {} - self.path = '/v1/services' - self.headers = fakeAuthTokenHeaders - self.environ = {} - self.version = (1, 0) - - def __setitem__(self, index, value): - setattr(self, index, value) - - -class FakePecanResponse(mock.Mock): - - def __init__(self, **kwargs): - super(FakePecanResponse, self).__init__(**kwargs) - self.status = None - - -class FakeApp(object): - pass - - -class FakeService(mock.Mock): - def __init__(self, **kwargs): - super(FakeService, self).__init__(**kwargs) - self.__tablename__ = 'service' - self.__resource__ = 'services' - self.user_id = 'fake user id' - self.project_id = 'fake project id' - self.uuid = 'test_uuid' - self.id = 8 - self.name = 'james' - self.service_type = 'not_this' - self.description = 'amazing' - self.tags = ['this', 'and that'] - self.read_only = True - - def as_dict(self): - return dict(service_type=self.service_type, - user_id=self.user_id, - project_id=self.project_id, - uuid=self.uuid, - id=self.id, - name=self.name, - tags=self.tags, - read_only=self.read_only, - description=self.description) - - -class FakeAuthProtocol(mock.Mock): - - def __init__(self, **kwargs): - super(FakeAuthProtocol, self).__init__(**kwargs) - self.app = FakeApp() - self.config = '' - - -class FakeLoopingCall(object): - '''Fake a looping call without the eventlet stuff - - For tests, just do a simple implementation so that we can ensure the - called logic works rather than testing LoopingCall - ''' - - def __init__(self, **kwargs): - func = kwargs.pop("f", None) - if func is None: - raise ValueError("Must pass a callable in the -f kwarg.") - self.call_func = func - - def start(self, interval, **kwargs): - intitial_delay = kwargs.pop("initial_delay", -1) - stop_on_exception = kwargs.pop("stop_on_exception", True) - if intitial_delay: - time.sleep(intitial_delay) - while True: - try: - self.call_func() - except loopingcall.LoopingCallDone: - return 0 - except Exception as exc: - if stop_on_exception: - raise exc - if interval: - time.sleep(interval) diff --git a/magnum/tests/functional/__init__.py b/magnum/tests/functional/__init__.py deleted file mode 100644 index f969c2af..00000000 --- a/magnum/tests/functional/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -logging.basicConfig( - filename='functional-tests.log', - filemode='w', - level=logging.DEBUG, -) diff --git a/magnum/tests/functional/api/__init__.py b/magnum/tests/functional/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/api/base.py b/magnum/tests/functional/api/base.py deleted file mode 100755 index da030eae..00000000 --- a/magnum/tests/functional/api/base.py +++ /dev/null @@ -1,161 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import logging - -from tempest.common import credentials_factory as common_creds - -from magnum.tests.functional.common import base -from magnum.tests.functional.common import config -from magnum.tests.functional.common import manager - - -COPY_LOG_HELPER = "magnum/tests/contrib/copy_instance_logs.sh" - - -class BaseTempestTest(base.BaseMagnumTest): - """Sets up configuration required for functional tests""" - - ic_class_list = [] - ic_method_list = [] - LOG = logging.getLogger(__name__) - - def __init__(self, *args, **kwargs): - super(BaseTempestTest, self).__init__(*args, **kwargs) - - @classmethod - def setUpClass(cls): - super(BaseTempestTest, cls).setUpClass() - config.Config.setUp() - - @classmethod - def tearDownClass(cls): - super(BaseTempestTest, cls).tearDownClass() - cls.clear_credentials(clear_class_creds=True) - - def tearDown(self): - super(BaseTempestTest, self).tearDown() - self.clear_credentials(clear_method_creds=True) - - @classmethod - def clear_credentials(cls, - clear_class_creds=False, - clear_method_creds=False): - if clear_class_creds: - for ic in cls.ic_class_list: - ic.clear_creds() - if clear_method_creds: - for ic in cls.ic_method_list: - ic.clear_creds() - - @classmethod - def get_credentials(cls, name=None, - type_of_creds="default", - class_cleanup=False): - (creds, _) = cls.get_credentials_with_keypair(name, type_of_creds, - class_cleanup) - return creds - - @classmethod - def get_credentials_with_keypair(cls, name=None, - type_of_creds="default", - class_cleanup=False): - if name is None: - # Get name of test method - name = inspect.stack()[1][3] - if len(name) > 32: - name = name[0:32] - - # Choose type of isolated creds - ic = common_creds.get_credentials_provider( - name, - identity_version=config.Config.auth_version - ) - - if class_cleanup: - cls.ic_class_list.append(ic) - else: - cls.ic_method_list.append(ic) - - creds = None - if "admin" == type_of_creds: - creds = ic.get_admin_creds() - elif "alt" == type_of_creds: - creds = ic.get_alt_creds() - elif "default" == type_of_creds: - creds = ic.get_primary_creds() - else: - creds = ic.self.get_credentials(type_of_creds) - - _, keypairs_client = cls.get_clients( - creds, type_of_creds, 'keypair_setup') - - keypair = None - try: - keypairs_client.show_keypair(config.Config.keypair_id) - except Exception: - keypair_body = keypairs_client.create_keypair( - name=config.Config.keypair_id) - cls.LOG.debug("Keypair body: %s", keypair_body) - keypair = keypair_body['keypair']['private_key'] - return (creds, keypair) - - @classmethod - def get_clients(cls, creds, type_of_creds, request_type): - if "admin" == type_of_creds: - manager_inst = manager.AdminManager(credentials=creds.credentials, - request_type=request_type) - elif "alt" == type_of_creds: - manager_inst = manager.AltManager(credentials=creds.credentials, - request_type=request_type) - elif "default" == type_of_creds: - manager_inst = manager.DefaultManager( - credentials=creds.credentials, request_type=request_type) - else: - manager_inst = manager.DefaultManager( - credentials=creds.credentials, request_type=request_type) - - # create client with isolated creds - return (manager_inst.client, manager_inst.keypairs_client) - - @classmethod - def get_clients_with_existing_creds(cls, - name=None, - creds=None, - type_of_creds="default", - request_type=None, - class_cleanup=False): - if creds is None: - return cls.get_clients_with_new_creds(name, - type_of_creds, - request_type, - class_cleanup) - else: - return cls.get_clients(creds, type_of_creds, request_type) - - @classmethod - def get_clients_with_new_creds(cls, - name=None, - type_of_creds="default", - request_type=None, - class_cleanup=False): - """Creates isolated creds. - - :param name: name, will be used for dynamic creds - :param type_of_creds: admin, alt or default - :param request_type: ClusterTemplate or service - :returns: MagnumClient -- client with isolated creds. - :returns: KeypairClient -- allows for creating of keypairs - """ - creds = cls.get_credentials(name, type_of_creds, class_cleanup) - return cls.get_clients(creds, type_of_creds, request_type) diff --git a/magnum/tests/functional/api/v1/__init__.py b/magnum/tests/functional/api/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/api/v1/clients/__init__.py b/magnum/tests/functional/api/v1/clients/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/api/v1/clients/bay_client.py b/magnum/tests/functional/api/v1/clients/bay_client.py deleted file mode 100755 index 844ed4a2..00000000 --- a/magnum/tests/functional/api/v1/clients/bay_client.py +++ /dev/null @@ -1,170 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from tempest.lib import exceptions - -from magnum.tests.functional.api.v1.models import bay_model -from magnum.tests.functional.common import client -from magnum.tests.functional.common import utils - - -class BayClient(client.MagnumClient): - """Encapsulates REST calls and maps JSON to/from models""" - - LOG = logging.getLogger(__name__) - - @classmethod - def bays_uri(cls, filters=None): - """Construct bays uri with optional filters - - :param filters: Optional k:v dict that's converted to url query - :returns: url string - """ - - url = "/bays" - if filters: - url = cls.add_filters(url, filters) - return url - - @classmethod - def bay_uri(cls, bay_id): - """Construct bay uri - - :param bay_id: bay uuid or name - :returns: url string - """ - - return "{0}/{1}".format(cls.bays_uri(), bay_id) - - def list_bays(self, filters=None, **kwargs): - """Makes GET /bays request and returns BayCollection - - Abstracts REST call to return all bays - - :param filters: Optional k:v dict that's converted to url query - :returns: response object and BayCollection object - """ - - resp, body = self.get(self.bays_uri(filters), **kwargs) - return self.deserialize(resp, body, bay_model.BayCollection) - - def get_bay(self, bay_id, **kwargs): - """Makes GET /bay request and returns BayEntity - - Abstracts REST call to return a single bay based on uuid or name - - :param bay_id: bay uuid or name - :returns: response object and BayCollection object - """ - - resp, body = self.get(self.bay_uri(bay_id)) - return self.deserialize(resp, body, bay_model.BayEntity) - - def post_bay(self, model, **kwargs): - """Makes POST /bay request and returns BayEntity - - Abstracts REST call to create new bay - - :param model: BayEntity - :returns: response object and BayEntity object - """ - - resp, body = self.post( - self.bays_uri(), - body=model.to_json(), **kwargs) - return self.deserialize(resp, body, bay_model.BayEntity) - - def patch_bay(self, bay_id, baypatch_listmodel, **kwargs): - """Makes PATCH /bay request and returns BayEntity - - Abstracts REST call to update bay attributes - - :param bay_id: UUID of bay - :param baypatch_listmodel: BayPatchCollection - :returns: response object and BayEntity object - """ - - resp, body = self.patch( - self.bay_uri(bay_id), - body=baypatch_listmodel.to_json(), **kwargs) - return self.deserialize(resp, body, bay_model.BayEntity) - - def delete_bay(self, bay_id, **kwargs): - """Makes DELETE /bay request and returns response object - - Abstracts REST call to delete bay based on uuid or name - - :param bay_id: UUID or name of bay - :returns: response object - """ - - return self.delete(self.bay_uri(bay_id), **kwargs) - - def wait_for_bay_to_delete(self, bay_id): - utils.wait_for_condition( - lambda: self.does_bay_not_exist(bay_id), 10, 600) - - def wait_for_created_bay(self, bay_id, delete_on_error=True): - try: - utils.wait_for_condition( - lambda: self.does_bay_exist(bay_id), 10, 1800) - except Exception: - # In error state. Clean up the bay id if desired - self.LOG.error('Bay %s entered an exception state.', bay_id) - if delete_on_error: - self.LOG.error('We will attempt to delete bays now.') - self.delete_bay(bay_id) - self.wait_for_bay_to_delete(bay_id) - raise - - def wait_for_final_state(self, bay_id): - utils.wait_for_condition( - lambda: self.is_bay_in_final_state(bay_id), 10, 1800) - - def is_bay_in_final_state(self, bay_id): - try: - resp, model = self.get_bay(bay_id) - if model.status in ['CREATED', 'CREATE_COMPLETE', - 'ERROR', 'CREATE_FAILED']: - self.LOG.info('Bay %s succeeded.', bay_id) - return True - else: - return False - except exceptions.NotFound: - self.LOG.warning('Bay %s is not found.', bay_id) - return False - - def does_bay_exist(self, bay_id): - try: - resp, model = self.get_bay(bay_id) - if model.status in ['CREATED', 'CREATE_COMPLETE']: - self.LOG.info('Bay %s is created.', bay_id) - return True - elif model.status in ['ERROR', 'CREATE_FAILED']: - self.LOG.error('Bay %s is in fail state.', bay_id) - raise exceptions.ServerFault( - "Got into an error condition: %s for %s", - (model.status, bay_id)) - else: - return False - except exceptions.NotFound: - self.LOG.warning('Bay %s is not found.', bay_id) - return False - - def does_bay_not_exist(self, bay_id): - try: - self.get_bay(bay_id) - except exceptions.NotFound: - self.LOG.warning('Bay %s is not found.', bay_id) - return True - return False diff --git a/magnum/tests/functional/api/v1/clients/baymodel_client.py b/magnum/tests/functional/api/v1/clients/baymodel_client.py deleted file mode 100644 index 10d6a8b4..00000000 --- a/magnum/tests/functional/api/v1/clients/baymodel_client.py +++ /dev/null @@ -1,105 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.api.v1.models import baymodel_model -from magnum.tests.functional.common import client - - -class BayModelClient(client.MagnumClient): - """Encapsulates REST calls and maps JSON to/from models""" - - @classmethod - def baymodels_uri(cls, filters=None): - """Construct baymodels uri with optional filters - - :param filters: Optional k:v dict that's converted to url query - :returns: url string - """ - - url = "/baymodels" - if filters: - url = cls.add_filters(url, filters) - return url - - @classmethod - def baymodel_uri(cls, baymodel_id): - """Construct baymodel uri - - :param baymodel_id: baymodel uuid or name - :returns: url string - """ - - return "{0}/{1}".format(cls.baymodels_uri(), baymodel_id) - - def list_baymodels(self, filters=None, **kwargs): - """Makes GET /baymodels request and returns BayModelCollection - - Abstracts REST call to return all baymodels - - :param filters: Optional k:v dict that's converted to url query - :returns: response object and BayModelCollection object - """ - - resp, body = self.get(self.baymodels_uri(filters), **kwargs) - return self.deserialize(resp, body, baymodel_model.BayModelCollection) - - def get_baymodel(self, baymodel_id, **kwargs): - """Makes GET /baymodel request and returns BayModelEntity - - Abstracts REST call to return a single baymodel based on uuid or name - - :param baymodel_id: baymodel uuid or name - :returns: response object and BayModelCollection object - """ - - resp, body = self.get(self.baymodel_uri(baymodel_id)) - return self.deserialize(resp, body, baymodel_model.BayModelEntity) - - def post_baymodel(self, model, **kwargs): - """Makes POST /baymodel request and returns BayModelEntity - - Abstracts REST call to create new baymodel - - :param model: BayModelEntity - :returns: response object and BayModelEntity object - """ - - resp, body = self.post( - self.baymodels_uri(), - body=model.to_json(), **kwargs) - return self.deserialize(resp, body, baymodel_model.BayModelEntity) - - def patch_baymodel(self, baymodel_id, baymodelpatch_listmodel, **kwargs): - """Makes PATCH /baymodel request and returns BayModelEntity - - Abstracts REST call to update baymodel attributes - - :param baymodel_id: UUID of baymodel - :param baymodelpatch_listmodel: BayModelPatchCollection - :returns: response object and BayModelEntity object - """ - - resp, body = self.patch( - self.baymodel_uri(baymodel_id), - body=baymodelpatch_listmodel.to_json(), **kwargs) - return self.deserialize(resp, body, baymodel_model.BayModelEntity) - - def delete_baymodel(self, baymodel_id, **kwargs): - """Makes DELETE /baymodel request and returns response object - - Abstracts REST call to delete baymodel based on uuid or name - - :param baymodel_id: UUID or name of baymodel - :returns: response object - """ - - return self.delete(self.baymodel_uri(baymodel_id), **kwargs) diff --git a/magnum/tests/functional/api/v1/clients/cert_client.py b/magnum/tests/functional/api/v1/clients/cert_client.py deleted file mode 100644 index 6853f359..00000000 --- a/magnum/tests/functional/api/v1/clients/cert_client.py +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.api.v1.models import cert_model -from magnum.tests.functional.common import client - - -class CertClient(client.MagnumClient): - """Encapsulates REST calls and maps JSON to/from models""" - - url = "/certificates" - - @classmethod - def cert_uri(cls, cluster_id): - """Construct cluster uri - - :param cluster_id: cluster uuid or name - :returns: url string - """ - - return "{0}/{1}".format(cls.url, cluster_id) - - def get_cert(self, cluster_id, **kwargs): - """Makes GET /certificates/cluster_id request and returns CertEntity - - Abstracts REST call to return a single cert based on uuid or name - - :param cluster_id: cluster uuid or name - :returns: response object and ClusterCollection object - """ - - resp, body = self.get(self.cert_uri(cluster_id), **kwargs) - return self.deserialize(resp, body, cert_model.CertEntity) - - def post_cert(self, model, **kwargs): - """Makes POST /certificates request and returns CertEntity - - Abstracts REST call to sign new certificate - - :param model: CertEntity - :returns: response object and CertEntity object - """ - - resp, body = self.post( - CertClient.url, - body=model.to_json(), **kwargs) - return self.deserialize(resp, body, cert_model.CertEntity) diff --git a/magnum/tests/functional/api/v1/clients/cluster_client.py b/magnum/tests/functional/api/v1/clients/cluster_client.py deleted file mode 100755 index 21e4be9c..00000000 --- a/magnum/tests/functional/api/v1/clients/cluster_client.py +++ /dev/null @@ -1,173 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from tempest.lib import exceptions - -from magnum.tests.functional.api.v1.models import cluster_id_model -from magnum.tests.functional.api.v1.models import cluster_model -from magnum.tests.functional.common import client -from magnum.tests.functional.common import utils - - -class ClusterClient(client.MagnumClient): - """Encapsulates REST calls and maps JSON to/from models""" - - LOG = logging.getLogger(__name__) - - @classmethod - def clusters_uri(cls, filters=None): - """Construct clusters uri with optional filters - - :param filters: Optional k:v dict that's converted to url query - :returns: url string - """ - - url = "/clusters" - if filters: - url = cls.add_filters(url, filters) - return url - - @classmethod - def cluster_uri(cls, cluster_id): - """Construct cluster uri - - :param cluster_id: cluster uuid or name - :returns: url string - """ - - return "{0}/{1}".format(cls.clusters_uri(), cluster_id) - - def list_clusters(self, filters=None, **kwargs): - """Makes GET /clusters request and returns ClusterCollection - - Abstracts REST call to return all clusters - - :param filters: Optional k:v dict that's converted to url query - :returns: response object and ClusterCollection object - """ - - resp, body = self.get(self.clusters_uri(filters), **kwargs) - return self.deserialize(resp, body, cluster_model.ClusterCollection) - - def get_cluster(self, cluster_id, **kwargs): - """Makes GET /cluster request and returns ClusterEntity - - Abstracts REST call to return a single cluster based on uuid or name - - :param cluster_id: cluster uuid or name - :returns: response object and ClusterCollection object - """ - - resp, body = self.get(self.cluster_uri(cluster_id)) - return self.deserialize(resp, body, cluster_model.ClusterEntity) - - def post_cluster(self, model, **kwargs): - """Makes POST /cluster request and returns ClusterIdEntity - - Abstracts REST call to create new cluster - - :param model: ClusterEntity - :returns: response object and ClusterIdEntity object - """ - - resp, body = self.post( - self.clusters_uri(), - body=model.to_json(), **kwargs) - return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity) - - def patch_cluster(self, cluster_id, clusterpatch_listmodel, **kwargs): - """Makes PATCH /cluster request and returns ClusterIdEntity - - Abstracts REST call to update cluster attributes - - :param cluster_id: UUID of cluster - :param clusterpatch_listmodel: ClusterPatchCollection - :returns: response object and ClusterIdEntity object - """ - - resp, body = self.patch( - self.cluster_uri(cluster_id), - body=clusterpatch_listmodel.to_json(), **kwargs) - return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity) - - def delete_cluster(self, cluster_id, **kwargs): - """Makes DELETE /cluster request and returns response object - - Abstracts REST call to delete cluster based on uuid or name - - :param cluster_id: UUID or name of cluster - :returns: response object - """ - - return self.delete(self.cluster_uri(cluster_id), **kwargs) - - def wait_for_cluster_to_delete(self, cluster_id): - utils.wait_for_condition( - lambda: self.does_cluster_not_exist(cluster_id), 10, 600) - - def wait_for_created_cluster(self, cluster_id, delete_on_error=True): - try: - utils.wait_for_condition( - lambda: self.does_cluster_exist(cluster_id), 10, 1800) - except Exception: - # In error state. Clean up the cluster id if desired - self.LOG.error('Cluster %s entered an exception state.', - cluster_id) - if delete_on_error: - self.LOG.error('We will attempt to delete clusters now.') - self.delete_cluster(cluster_id) - self.wait_for_cluster_to_delete(cluster_id) - raise - - def wait_for_final_state(self, cluster_id): - utils.wait_for_condition( - lambda: self.is_cluster_in_final_state(cluster_id), 10, 1800) - - def is_cluster_in_final_state(self, cluster_id): - try: - resp, model = self.get_cluster(cluster_id) - if model.status in ['CREATED', 'CREATE_COMPLETE', - 'ERROR', 'CREATE_FAILED']: - self.LOG.info('Cluster %s succeeded.', cluster_id) - return True - else: - return False - except exceptions.NotFound: - self.LOG.warning('Cluster %s is not found.', cluster_id) - return False - - def does_cluster_exist(self, cluster_id): - try: - resp, model = self.get_cluster(cluster_id) - if model.status in ['CREATED', 'CREATE_COMPLETE']: - self.LOG.info('Cluster %s is created.', cluster_id) - return True - elif model.status in ['ERROR', 'CREATE_FAILED']: - self.LOG.error('Cluster %s is in fail state.', - cluster_id) - raise exceptions.ServerFault( - "Got into an error condition: %s for %s", - (model.status, cluster_id)) - else: - return False - except exceptions.NotFound: - self.LOG.warning('Cluster %s is not found.', cluster_id) - return False - - def does_cluster_not_exist(self, cluster_id): - try: - self.get_cluster(cluster_id) - except exceptions.NotFound: - self.LOG.warning('Cluster %s is not found.', cluster_id) - return True - return False diff --git a/magnum/tests/functional/api/v1/clients/cluster_template_client.py b/magnum/tests/functional/api/v1/clients/cluster_template_client.py deleted file mode 100644 index e3b8e17c..00000000 --- a/magnum/tests/functional/api/v1/clients/cluster_template_client.py +++ /dev/null @@ -1,113 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.api.v1.models import cluster_template_model -from magnum.tests.functional.common import client - - -class ClusterTemplateClient(client.MagnumClient): - """Encapsulates REST calls and maps JSON to/from models""" - - @classmethod - def cluster_templates_uri(cls, filters=None): - """Construct clustertemplates uri with optional filters - - :param filters: Optional k:v dict that's converted to url query - :returns: url string - """ - - url = "/clustertemplates" - if filters: - url = cls.add_filters(url, filters) - return url - - @classmethod - def cluster_template_uri(cls, cluster_template_id): - """Construct cluster_template uri - - :param cluster_template_id: cluster_template uuid or name - :returns: url string - """ - - return "{0}/{1}".format(cls.cluster_templates_uri(), - cluster_template_id) - - def list_cluster_templates(self, filters=None, **kwargs): - """Makes GET /clustertemplates request - - Abstracts REST call to return all clustertemplates - - :param filters: Optional k:v dict that's converted to url query - :returns: response object and ClusterTemplateCollection object - """ - - resp, body = self.get(self.cluster_templates_uri(filters), **kwargs) - collection = cluster_template_model.ClusterTemplateCollection - return self.deserialize(resp, body, collection) - - def get_cluster_template(self, cluster_template_id, **kwargs): - """Makes GET /clustertemplate request and returns ClusterTemplateEntity - - Abstracts REST call to return a single clustertempalte based on uuid - or name - - :param cluster_template_id: clustertempalte uuid or name - :returns: response object and ClusterTemplateCollection object - """ - - resp, body = self.get(self.cluster_template_uri(cluster_template_id)) - return self.deserialize(resp, body, - cluster_template_model.ClusterTemplateEntity) - - def post_cluster_template(self, model, **kwargs): - """Makes POST /clustertemplate request - - Abstracts REST call to create new clustertemplate - - :param model: ClusterTemplateEntity - :returns: response object and ClusterTemplateEntity object - """ - - resp, body = self.post( - self.cluster_templates_uri(), - body=model.to_json(), **kwargs) - entity = cluster_template_model.ClusterTemplateEntity - return self.deserialize(resp, body, entity) - - def patch_cluster_template(self, cluster_template_id, - cluster_templatepatch_listmodel, **kwargs): - """Makes PATCH /clustertemplate and returns ClusterTemplateEntity - - Abstracts REST call to update clustertemplate attributes - - :param cluster_template_id: UUID of clustertemplate - :param cluster_templatepatch_listmodel: ClusterTemplatePatchCollection - :returns: response object and ClusterTemplateEntity object - """ - - resp, body = self.patch( - self.cluster_template_uri(cluster_template_id), - body=cluster_templatepatch_listmodel.to_json(), **kwargs) - return self.deserialize(resp, body, - cluster_template_model.ClusterTemplateEntity) - - def delete_cluster_template(self, cluster_template_id, **kwargs): - """Makes DELETE /clustertemplate request and returns response object - - Abstracts REST call to delete clustertemplate based on uuid or name - - :param cluster_template_id: UUID or name of clustertemplate - :returns: response object - """ - - return self.delete(self.cluster_template_uri(cluster_template_id), - **kwargs) diff --git a/magnum/tests/functional/api/v1/clients/magnum_service_client.py b/magnum/tests/functional/api/v1/clients/magnum_service_client.py deleted file mode 100644 index 0559c913..00000000 --- a/magnum/tests/functional/api/v1/clients/magnum_service_client.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.api.v1.models import magnum_service_model -from magnum.tests.functional.common import client - - -class MagnumServiceClient(client.MagnumClient): - """Encapsulates REST calls and maps JSON to/from models""" - - @classmethod - def magnum_service_uri(cls, filters=None): - """Construct magnum services uri with optional filters - - :param filters: Optional k:v dict that's converted to url query - :returns: url string - """ - - url = "/mservices" - if filters: - url = cls.add_filters(url, filters) - return url - - def magnum_service_list(self, filters=None, **kwargs): - """Makes GET /mservices request and returns MagnumServiceCollection - - Abstracts REST call to return all magnum services. - - :param filters: Optional k:v dict that's converted to url query - :returns: response object and MagnumServiceCollection object - """ - - resp, body = self.get(self.magnum_service_uri(filters), **kwargs) - return self.deserialize(resp, body, - magnum_service_model.MagnumServiceCollection) diff --git a/magnum/tests/functional/api/v1/models/__init__.py b/magnum/tests/functional/api/v1/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/api/v1/models/bay_model.py b/magnum/tests/functional/api/v1/models/bay_model.py deleted file mode 100644 index 4c89acf3..00000000 --- a/magnum/tests/functional/api/v1/models/bay_model.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.common import models - - -class BayData(models.BaseModel): - """Data that encapsulates bay attributes""" - pass - - -class BayEntity(models.EntityModel): - """Entity Model that represents a single instance of BayData""" - ENTITY_NAME = 'bay' - MODEL_TYPE = BayData - - -class BayCollection(models.CollectionModel): - """Collection Model that represents a list of BayData objects""" - COLLECTION_NAME = 'baylists' - MODEL_TYPE = BayData diff --git a/magnum/tests/functional/api/v1/models/baymodel_model.py b/magnum/tests/functional/api/v1/models/baymodel_model.py deleted file mode 100644 index 606fb04f..00000000 --- a/magnum/tests/functional/api/v1/models/baymodel_model.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.common import models - - -class BayModelData(models.BaseModel): - """Data that encapsulates baymodel attributes""" - pass - - -class BayModelEntity(models.EntityModel): - """Entity Model that represents a single instance of BayModelData""" - ENTITY_NAME = 'baymodel' - MODEL_TYPE = BayModelData - - -class BayModelCollection(models.CollectionModel): - """Collection Model that represents a list of BayModelData objects""" - COLLECTION_NAME = 'baymodellists' - MODEL_TYPE = BayModelData diff --git a/magnum/tests/functional/api/v1/models/baymodelpatch_model.py b/magnum/tests/functional/api/v1/models/baymodelpatch_model.py deleted file mode 100644 index 74fcbfd4..00000000 --- a/magnum/tests/functional/api/v1/models/baymodelpatch_model.py +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from magnum.tests.functional.common import models - - -class BayModelPatchData(models.BaseModel): - """Data that encapsulates baymodelpatch attributes""" - pass - - -class BayModelPatchEntity(models.EntityModel): - """Entity Model that represents a single instance of BayModelPatchData""" - ENTITY_NAME = 'baymodelpatch' - MODEL_TYPE = BayModelPatchData - - -class BayModelPatchCollection(models.CollectionModel): - """Collection Model that represents a list of BayModelPatchData objects""" - MODEL_TYPE = BayModelPatchData - COLLECTION_NAME = 'baymodelpatchlist' - - def to_json(self): - """Converts BayModelPatchCollection to json - - Retrieves list from COLLECTION_NAME attribute and converts each object - to dict, appending it to a list. Then converts the entire list to json - - This is required due to COLLECTION_NAME holding a list of objects that - needed to be converted to dict individually - - :returns: json object - """ - - data = getattr(self, BayModelPatchCollection.COLLECTION_NAME) - collection = [] - for d in data: - collection.append(d.to_dict()) - return json.dumps(collection) - - @classmethod - def from_dict(cls, data): - """Converts dict to BayModelPatchData - - Converts data dict to list of BayModelPatchData objects and stores it - in COLLECTION_NAME - - Example of dict data: - - [{ - "path": "/name", - "value": "myname", - "op": "replace" - }] - - :param data: dict of patch data - :returns: json object - """ - - model = cls() - collection = [] - for d in data: - collection.append(cls.MODEL_TYPE.from_dict(d)) - setattr(model, cls.COLLECTION_NAME, collection) - return model diff --git a/magnum/tests/functional/api/v1/models/baypatch_model.py b/magnum/tests/functional/api/v1/models/baypatch_model.py deleted file mode 100644 index 4a0f1374..00000000 --- a/magnum/tests/functional/api/v1/models/baypatch_model.py +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from magnum.tests.functional.common import models - - -class BayPatchData(models.BaseModel): - """Data that encapsulates baypatch attributes""" - pass - - -class BayPatchEntity(models.EntityModel): - """Entity Model that represents a single instance of BayPatchData""" - ENTITY_NAME = 'baypatch' - MODEL_TYPE = BayPatchData - - -class BayPatchCollection(models.CollectionModel): - """Collection Model that represents a list of BayPatchData objects""" - MODEL_TYPE = BayPatchData - COLLECTION_NAME = 'baypatchlist' - - def to_json(self): - """Converts BayPatchCollection to json - - Retrieves list from COLLECTION_NAME attribute and converts each object - to dict, appending it to a list. Then converts the entire list to json - - This is required due to COLLECTION_NAME holding a list of objects that - needed to be converted to dict individually - - :returns: json object - """ - - data = getattr(self, BayPatchCollection.COLLECTION_NAME) - collection = [] - for d in data: - collection.append(d.to_dict()) - return json.dumps(collection) - - @classmethod - def from_dict(cls, data): - """Converts dict to BayPatchData - - Converts data dict to list of BayPatchData objects and stores it - in COLLECTION_NAME - - Example of dict data: - - [{ - "path": "/name", - "value": "myname", - "op": "replace" - }] - - :param data: dict of patch data - :returns: json object - """ - - model = cls() - collection = [] - for d in data: - collection.append(cls.MODEL_TYPE.from_dict(d)) - setattr(model, cls.COLLECTION_NAME, collection) - return model diff --git a/magnum/tests/functional/api/v1/models/cert_model.py b/magnum/tests/functional/api/v1/models/cert_model.py deleted file mode 100644 index 4948a392..00000000 --- a/magnum/tests/functional/api/v1/models/cert_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.common import models - - -class CertData(models.BaseModel): - """Data that encapsulates cert attributes""" - pass - - -class CertEntity(models.EntityModel): - """Entity Model that represents a single instance of CertData""" - ENTITY_NAME = 'certificate' - MODEL_TYPE = CertData diff --git a/magnum/tests/functional/api/v1/models/cluster_id_model.py b/magnum/tests/functional/api/v1/models/cluster_id_model.py deleted file mode 100644 index d103f971..00000000 --- a/magnum/tests/functional/api/v1/models/cluster_id_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.common import models - - -class ClusterIdData(models.BaseModel): - """Data that encapsulates ClusterId attributes""" - pass - - -class ClusterIdEntity(models.EntityModel): - """Entity Model that represents a single instance of CertData""" - ENTITY_NAME = 'clusterid' - MODEL_TYPE = ClusterIdData diff --git a/magnum/tests/functional/api/v1/models/cluster_model.py b/magnum/tests/functional/api/v1/models/cluster_model.py deleted file mode 100644 index af80c946..00000000 --- a/magnum/tests/functional/api/v1/models/cluster_model.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.common import models - - -class ClusterData(models.BaseModel): - """Data that encapsulates cluster attributes""" - pass - - -class ClusterEntity(models.EntityModel): - """Entity Model that represents a single instance of ClusterData""" - ENTITY_NAME = 'cluster' - MODEL_TYPE = ClusterData - - -class ClusterCollection(models.CollectionModel): - """Collection Model that represents a list of ClusterData objects""" - COLLECTION_NAME = 'clusterlists' - MODEL_TYPE = ClusterData diff --git a/magnum/tests/functional/api/v1/models/cluster_template_model.py b/magnum/tests/functional/api/v1/models/cluster_template_model.py deleted file mode 100644 index 4471f1c4..00000000 --- a/magnum/tests/functional/api/v1/models/cluster_template_model.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.common import models - - -class ClusterTemplateData(models.BaseModel): - """Data that encapsulates clustertemplate attributes""" - pass - - -class ClusterTemplateEntity(models.EntityModel): - """Entity Model that represents a single instance of ClusterTemplateData""" - ENTITY_NAME = 'clustertemplate' - MODEL_TYPE = ClusterTemplateData - - -class ClusterTemplateCollection(models.CollectionModel): - """Collection that represents a list of ClusterTemplateData objects""" - COLLECTION_NAME = 'clustertemplatelists' - MODEL_TYPE = ClusterTemplateData diff --git a/magnum/tests/functional/api/v1/models/cluster_templatepatch_model.py b/magnum/tests/functional/api/v1/models/cluster_templatepatch_model.py deleted file mode 100644 index 83a6b674..00000000 --- a/magnum/tests/functional/api/v1/models/cluster_templatepatch_model.py +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from magnum.tests.functional.common import models - - -class ClusterTemplatePatchData(models.BaseModel): - """Data that encapsulates clustertemplatepatch attributes""" - pass - - -class ClusterTemplatePatchEntity(models.EntityModel): - """Model that represents a single instance of ClusterTemplatePatchData""" - ENTITY_NAME = 'clustertemplatepatch' - MODEL_TYPE = ClusterTemplatePatchData - - -class ClusterTemplatePatchCollection(models.CollectionModel): - """Model that represents a list of ClusterTemplatePatchData objects""" - MODEL_TYPE = ClusterTemplatePatchData - COLLECTION_NAME = 'clustertemplatepatchlist' - - def to_json(self): - """Converts ClusterTemplatePatchCollection to json - - Retrieves list from COLLECTION_NAME attribute and converts each object - to dict, appending it to a list. Then converts the entire list to - json - - This is required due to COLLECTION_NAME holding a list of objects that - needed to be converted to dict individually - - :returns: json object - """ - - data = getattr(self, ClusterTemplatePatchCollection.COLLECTION_NAME) - collection = [] - for d in data: - collection.append(d.to_dict()) - return json.dumps(collection) - - @classmethod - def from_dict(cls, data): - """Converts dict to ClusterTemplatePatchData - - Converts data dict to list of ClusterTemplatePatchData objects and - stores it in COLLECTION_NAME - - Example of dict data: - - [{ - "path": "/name", - "value": "myname", - "op": "replace" - }] - - :param data: dict of patch data - :returns: json object - """ - - model = cls() - collection = [] - for d in data: - collection.append(cls.MODEL_TYPE.from_dict(d)) - setattr(model, cls.COLLECTION_NAME, collection) - return model diff --git a/magnum/tests/functional/api/v1/models/clusterpatch_model.py b/magnum/tests/functional/api/v1/models/clusterpatch_model.py deleted file mode 100644 index 6e93f377..00000000 --- a/magnum/tests/functional/api/v1/models/clusterpatch_model.py +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from magnum.tests.functional.common import models - - -class ClusterPatchData(models.BaseModel): - """Data that encapsulates clusterpatch attributes""" - pass - - -class ClusterPatchEntity(models.EntityModel): - """Entity Model that represents a single instance of ClusterPatchData""" - ENTITY_NAME = 'clusterpatch' - MODEL_TYPE = ClusterPatchData - - -class ClusterPatchCollection(models.CollectionModel): - """Collection Model that represents a list of ClusterPatchData objects""" - MODEL_TYPE = ClusterPatchData - COLLECTION_NAME = 'clusterpatchlist' - - def to_json(self): - """Converts ClusterPatchCollection to json - - Retrieves list from COLLECTION_NAME attribute and converts each object - to dict, appending it to a list. Then converts the entire list to json - - This is required due to COLLECTION_NAME holding a list of objects that - needed to be converted to dict individually - - :returns: json object - """ - - data = getattr(self, ClusterPatchCollection.COLLECTION_NAME) - collection = [] - for d in data: - collection.append(d.to_dict()) - return json.dumps(collection) - - @classmethod - def from_dict(cls, data): - """Converts dict to ClusterPatchData - - Converts data dict to list of ClusterPatchData objects and stores it - in COLLECTION_NAME - - Example of dict data: - - [{ - "path": "/name", - "value": "myname", - "op": "replace" - }] - - :param data: dict of patch data - :returns: json object - """ - - model = cls() - collection = [] - for d in data: - collection.append(cls.MODEL_TYPE.from_dict(d)) - setattr(model, cls.COLLECTION_NAME, collection) - return model diff --git a/magnum/tests/functional/api/v1/models/magnum_service_model.py b/magnum/tests/functional/api/v1/models/magnum_service_model.py deleted file mode 100644 index 94a23196..00000000 --- a/magnum/tests/functional/api/v1/models/magnum_service_model.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.common import models - - -class MagnumServiceData(models.BaseModel): - """Data that encapsulates magnum_service attributes""" - pass - - -class MagnumServiceEntity(models.EntityModel): - """Entity Model that represents a single instance of MagnumServiceData""" - ENTITY_NAME = 'mservice' - MODEL_TYPE = MagnumServiceData - - -class MagnumServiceCollection(models.CollectionModel): - """Collection Model that represents a list of MagnumServiceData objects""" - COLLECTION_NAME = 'mservicelists' - MODEL_TYPE = MagnumServiceData diff --git a/magnum/tests/functional/api/v1/test_bay.py b/magnum/tests/functional/api/v1/test_bay.py deleted file mode 100755 index c8f55865..00000000 --- a/magnum/tests/functional/api/v1/test_bay.py +++ /dev/null @@ -1,216 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - -from oslo_log import log as logging -from oslo_utils import uuidutils -from tempest.lib.common.utils import data_utils -from tempest.lib import exceptions -import testtools - -from magnum.tests.functional.api import base -from magnum.tests.functional.common import config -from magnum.tests.functional.common import datagen - - -class BayTest(base.BaseTempestTest): - - """Tests for bay CRUD.""" - - LOG = logging.getLogger(__name__) - - def __init__(self, *args, **kwargs): - super(BayTest, self).__init__(*args, **kwargs) - self.bays = [] - self.creds = None - self.keypair = None - self.baymodel = None - self.baymodel_client = None - self.keypairs_client = None - self.bay_client = None - self.cert_client = None - - def setUp(self): - try: - super(BayTest, self).setUp() - (self.creds, self.keypair) = self.get_credentials_with_keypair( - type_of_creds='default') - (self.baymodel_client, - self.keypairs_client) = self.get_clients_with_existing_creds( - creds=self.creds, - type_of_creds='default', - request_type='baymodel') - (self.bay_client, _) = self.get_clients_with_existing_creds( - creds=self.creds, - type_of_creds='default', - request_type='bay') - (self.cert_client, _) = self.get_clients_with_existing_creds( - creds=self.creds, - type_of_creds='default', - request_type='cert') - model = datagen.valid_swarm_baymodel() - _, self.baymodel = self._create_baymodel(model) - - # NOTE (dimtruck) by default tempest sets timeout to 20 mins. - # We need more time. - test_timeout = 1800 - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - except Exception: - self.tearDown() - raise - - def tearDown(self): - try: - bay_list = self.bays[:] - for bay_id in bay_list: - self._delete_bay(bay_id) - self.bays.remove(bay_id) - if self.baymodel: - self._delete_baymodel(self.baymodel.uuid) - finally: - super(BayTest, self).tearDown() - - def _create_baymodel(self, baymodel_model): - self.LOG.debug('We will create a baymodel for %s', baymodel_model) - resp, model = self.baymodel_client.post_baymodel(baymodel_model) - return resp, model - - def _delete_baymodel(self, baymodel_id): - self.LOG.debug('We will delete a baymodel for %s', baymodel_id) - resp, model = self.baymodel_client.delete_baymodel(baymodel_id) - return resp, model - - def _create_bay(self, bay_model, is_async=False): - self.LOG.debug('We will create bay for %s', bay_model) - headers = {'Content-Type': 'application/json', - 'Accept': 'application/json'} - if is_async: - headers["OpenStack-API-Version"] = "container-infra 1.2" - resp, model = self.bay_client.post_bay(bay_model, headers=headers) - self.LOG.debug('Response: %s', resp) - if is_async: - self.assertEqual(202, resp.status) - else: - self.assertEqual(201, resp.status) - self.assertIsNotNone(model.uuid) - self.assertTrue(uuidutils.is_uuid_like(model.uuid)) - self.bays.append(model.uuid) - self.bay_uuid = model.uuid - if config.Config.copy_logs: - self.addCleanup(self.copy_logs_handler( - lambda: list( - [self._get_bay_by_id(self.bay_uuid)[1].master_addresses, - self._get_bay_by_id(self.bay_uuid)[1].node_addresses]), - self.baymodel.coe, - self.keypair)) - self.bay_client.wait_for_created_bay(model.uuid, delete_on_error=False) - return resp, model - - def _delete_bay(self, bay_id): - self.LOG.debug('We will delete a bay for %s', bay_id) - resp, model = self.bay_client.delete_bay(bay_id) - self.assertEqual(204, resp.status) - self.bay_client.wait_for_bay_to_delete(bay_id) - self.assertRaises( - exceptions.NotFound, - self.cert_client.get_cert, bay_id) - return resp, model - - def _get_bay_by_id(self, bay_id): - resp, model = self.bay_client.get_bay(bay_id) - return resp, model - - @testtools.testcase.attr('negative') - def test_create_bay_for_nonexisting_baymodel(self): - gen_model = datagen.valid_bay_data(baymodel_id='this-does-not-exist') - self.assertRaises( - exceptions.BadRequest, - self.bay_client.post_bay, gen_model) - - @testtools.testcase.attr('negative') - def test_create_bay_with_node_count_0(self): - gen_model = datagen.valid_bay_data( - baymodel_id=self.baymodel.uuid, node_count=0) - self.assertRaises( - exceptions.BadRequest, - self.bay_client.post_bay, gen_model) - - @testtools.testcase.attr('negative') - def test_create_bay_with_zero_masters(self): - gen_model = datagen.valid_bay_data(baymodel_id=self.baymodel.uuid, - master_count=0) - self.assertRaises( - exceptions.BadRequest, - self.bay_client.post_bay, gen_model) - - @testtools.testcase.attr('negative') - def test_create_bay_with_nonexisting_flavor(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, baymodel = self._create_baymodel(gen_model) - self.assertEqual(201, resp.status) - self.assertIsNotNone(baymodel.uuid) - - gen_model = datagen.valid_bay_data(baymodel_id=baymodel.uuid) - gen_model.flavor_id = 'aaa' - self.assertRaises( - exceptions.BadRequest, - self.bay_client.post_bay, gen_model) - - resp, _ = self._delete_baymodel(baymodel.uuid) - self.assertEqual(204, resp.status) - - @testtools.testcase.attr('negative') - def test_create_bay_with_nonexisting_keypair(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, baymodel = self._create_baymodel(gen_model) - self.assertEqual(201, resp.status) - self.assertIsNotNone(baymodel.uuid) - - gen_model = datagen.valid_bay_data(baymodel_id=baymodel.uuid) - gen_model.keypair_id = 'aaa' - self.assertRaises( - exceptions.BadRequest, - self.bay_client.post_bay, gen_model) - - resp, _ = self._delete_baymodel(baymodel.uuid) - self.assertEqual(204, resp.status) - - @testtools.testcase.attr('negative') - def test_create_bay_with_nonexisting_external_network(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, baymodel = self._create_baymodel(gen_model) - self.assertEqual(201, resp.status) - self.assertIsNotNone(baymodel.uuid) - - gen_model = datagen.valid_bay_data(baymodel_id=baymodel.uuid) - gen_model.external_network_id = 'aaa' - self.assertRaises( - exceptions.BadRequest, - self.bay_client.post_bay, gen_model) - - resp, _ = self._delete_baymodel(baymodel.uuid) - self.assertEqual(204, resp.status) - - @testtools.testcase.attr('negative') - def test_update_bay_for_nonexisting_bay(self): - patch_model = datagen.bay_name_patch_data() - - self.assertRaises( - exceptions.NotFound, - self.bay_client.patch_bay, 'fooo', patch_model) - - @testtools.testcase.attr('negative') - def test_delete_bay_for_nonexisting_bay(self): - self.assertRaises( - exceptions.NotFound, - self.bay_client.delete_bay, data_utils.rand_uuid()) diff --git a/magnum/tests/functional/api/v1/test_baymodel.py b/magnum/tests/functional/api/v1/test_baymodel.py deleted file mode 100644 index c95d38b1..00000000 --- a/magnum/tests/functional/api/v1/test_baymodel.py +++ /dev/null @@ -1,207 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from tempest.lib.common.utils import data_utils -from tempest.lib import exceptions -import testtools - -from magnum.tests.functional.api import base -from magnum.tests.functional.common import datagen - - -class BayModelTest(base.BaseTempestTest): - - """Tests for baymodel CRUD.""" - - def __init__(self, *args, **kwargs): - super(BayModelTest, self).__init__(*args, **kwargs) - self.baymodels = [] - self.baymodel_client = None - self.keypairs_client = None - - def setUp(self): - try: - super(BayModelTest, self).setUp() - (self.baymodel_client, - self.keypairs_client) = self.get_clients_with_new_creds( - type_of_creds='default', - request_type='baymodel') - except Exception: - self.tearDown() - raise - - def tearDown(self): - for baymodel_id in self.baymodels: - self._delete_baymodel(baymodel_id) - self.baymodels.remove(baymodel_id) - super(BayModelTest, self).tearDown() - - def _create_baymodel(self, baymodel_model): - resp, model = self.baymodel_client.post_baymodel(baymodel_model) - self.assertEqual(201, resp.status) - self.baymodels.append(model.uuid) - return resp, model - - def _delete_baymodel(self, baymodel_id): - resp, model = self.baymodel_client.delete_baymodel(baymodel_id) - self.assertEqual(204, resp.status) - return resp, model - - @testtools.testcase.attr('positive') - def test_list_baymodels(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - _, temp_model = self._create_baymodel(gen_model) - resp, model = self.baymodel_client.list_baymodels() - self.assertEqual(200, resp.status) - self.assertGreater(len(model.baymodels), 0) - self.assertIn( - temp_model.uuid, list([x['uuid'] for x in model.baymodels])) - - @testtools.testcase.attr('positive') - def test_create_baymodel(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, model = self._create_baymodel(gen_model) - - @testtools.testcase.attr('positive') - def test_create_get_public_baymodel(self): - gen_model = datagen.valid_swarm_baymodel(is_public=True) - self.assertRaises( - exceptions.Forbidden, - self.baymodel_client.post_baymodel, gen_model) - - @testtools.testcase.attr('positive') - def test_update_baymodel_public_by_uuid(self): - path = "/public" - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, old_model = self._create_baymodel(gen_model) - - patch_model = datagen.baymodel_replace_patch_data(path, value=True) - self.assertRaises( - exceptions.Forbidden, - self.baymodel_client.patch_baymodel, old_model.uuid, patch_model) - - @testtools.testcase.attr('positive') - def test_update_baymodel_by_uuid(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, old_model = self._create_baymodel(gen_model) - - path = "/name" - patch_model = datagen.baymodel_replace_patch_data(path) - resp, new_model = self.baymodel_client.patch_baymodel( - old_model.uuid, patch_model) - self.assertEqual(200, resp.status) - - resp, model = self.baymodel_client.get_baymodel(new_model.uuid) - self.assertEqual(200, resp.status) - self.assertEqual(old_model.uuid, new_model.uuid) - self.assertEqual(model.name, new_model.name) - - @testtools.testcase.attr('positive') - def test_delete_baymodel_by_uuid(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, model = self._create_baymodel(gen_model) - resp, _ = self.baymodel_client.delete_baymodel(model.uuid) - self.assertEqual(204, resp.status) - self.baymodels.remove(model.uuid) - - @testtools.testcase.attr('positive') - def test_delete_baymodel_by_name(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, model = self._create_baymodel(gen_model) - resp, _ = self.baymodel_client.delete_baymodel(model.name) - self.assertEqual(204, resp.status) - self.baymodels.remove(model.uuid) - - @testtools.testcase.attr('negative') - def test_get_baymodel_by_uuid_404(self): - self.assertRaises( - exceptions.NotFound, - self.baymodel_client.get_baymodel, data_utils.rand_uuid()) - - @testtools.testcase.attr('negative') - def test_update_baymodel_404(self): - path = "/name" - patch_model = datagen.baymodel_replace_patch_data(path) - - self.assertRaises( - exceptions.NotFound, - self.baymodel_client.patch_baymodel, - data_utils.rand_uuid(), patch_model) - - @testtools.testcase.attr('negative') - def test_delete_baymodel_404(self): - self.assertRaises( - exceptions.NotFound, - self.baymodel_client.delete_baymodel, data_utils.rand_uuid()) - - @testtools.testcase.attr('negative') - def test_get_baymodel_by_name_404(self): - self.assertRaises( - exceptions.NotFound, - self.baymodel_client.get_baymodel, 'fooo') - - @testtools.testcase.attr('negative') - def test_update_baymodel_name_not_found(self): - path = "/name" - patch_model = datagen.baymodel_replace_patch_data(path) - - self.assertRaises( - exceptions.NotFound, - self.baymodel_client.patch_baymodel, 'fooo', patch_model) - - @testtools.testcase.attr('negative') - def test_delete_baymodel_by_name_404(self): - self.assertRaises( - exceptions.NotFound, - self.baymodel_client.get_baymodel, 'fooo') - - @testtools.testcase.attr('negative') - def test_create_baymodel_missing_image(self): - gen_model = datagen.baymodel_data_with_missing_image() - self.assertRaises( - exceptions.BadRequest, - self.baymodel_client.post_baymodel, gen_model) - - @testtools.testcase.attr('negative') - def test_create_baymodel_missing_flavor(self): - gen_model = datagen.baymodel_data_with_missing_flavor() - self.assertRaises( - exceptions.BadRequest, - self.baymodel_client.post_baymodel, gen_model) - - @testtools.testcase.attr('negative') - def test_update_baymodel_invalid_patch(self): - # get json object - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, old_model = self._create_baymodel(gen_model) - - self.assertRaises( - exceptions.BadRequest, - self.baymodel_client.patch_baymodel, data_utils.rand_uuid(), - gen_model) - - @testtools.testcase.attr('negative') - def test_create_baymodel_invalid_network_driver(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - gen_model.network_driver = 'invalid_network_driver' - self.assertRaises( - exceptions.BadRequest, - self.baymodel_client.post_baymodel, gen_model) - - @testtools.testcase.attr('negative') - def test_create_baymodel_invalid_volume_driver(self): - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - gen_model.volume_driver = 'invalid_volume_driver' - self.assertRaises( - exceptions.BadRequest, - self.baymodel_client.post_baymodel, gen_model) diff --git a/magnum/tests/functional/api/v1/test_baymodel_admin.py b/magnum/tests/functional/api/v1/test_baymodel_admin.py deleted file mode 100644 index 9f8642ee..00000000 --- a/magnum/tests/functional/api/v1/test_baymodel_admin.py +++ /dev/null @@ -1,80 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import testtools - -from magnum.tests.functional.api import base -from magnum.tests.functional.common import datagen - - -class BayModelAdminTest(base.BaseTempestTest): - - """Tests for baymodel admin operations.""" - - def __init__(self, *args, **kwargs): - super(BayModelAdminTest, self).__init__(*args, **kwargs) - self.baymodels = [] - self.baymodel_client = None - self.keypairs_client = None - - def setUp(self): - try: - super(BayModelAdminTest, self).setUp() - (self.baymodel_client, - self.keypairs_client) = self.get_clients_with_new_creds( - type_of_creds='admin', - request_type='baymodel') - except Exception: - self.tearDown() - raise - - def tearDown(self): - for baymodel_id in self.baymodels: - self._delete_baymodel(baymodel_id) - self.baymodels.remove(baymodel_id) - super(BayModelAdminTest, self).tearDown() - - def _create_baymodel(self, baymodel_model): - resp, model = self.baymodel_client.post_baymodel(baymodel_model) - self.assertEqual(201, resp.status) - self.baymodels.append(model.uuid) - return resp, model - - def _delete_baymodel(self, baymodel_id): - resp, model = self.baymodel_client.delete_baymodel(baymodel_id) - self.assertEqual(204, resp.status) - return resp, model - - @testtools.testcase.attr('positive') - def test_create_get_public_baymodel(self): - gen_model = datagen.valid_swarm_baymodel(is_public=True) - resp, model = self._create_baymodel(gen_model) - - resp, model = self.baymodel_client.get_baymodel(model.uuid) - self.assertEqual(200, resp.status) - self.assertTrue(model.public) - - @testtools.testcase.attr('positive') - def test_update_baymodel_public_by_uuid(self): - path = "/public" - gen_model = datagen.baymodel_data_with_valid_keypair_image_flavor() - resp, old_model = self._create_baymodel(gen_model) - - patch_model = datagen.baymodel_replace_patch_data(path, value=True) - resp, new_model = self.baymodel_client.patch_baymodel( - old_model.uuid, patch_model) - self.assertEqual(200, resp.status) - - resp, model = self.baymodel_client.get_baymodel(new_model.uuid) - self.assertEqual(200, resp.status) - self.assertTrue(model.public) diff --git a/magnum/tests/functional/api/v1/test_cluster.py b/magnum/tests/functional/api/v1/test_cluster.py deleted file mode 100755 index fc2a1481..00000000 --- a/magnum/tests/functional/api/v1/test_cluster.py +++ /dev/null @@ -1,262 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - -from oslo_log import log as logging -from oslo_utils import uuidutils -from tempest.lib.common.utils import data_utils -from tempest.lib import exceptions -import testtools - -from magnum.tests.functional.api import base -from magnum.tests.functional.common import config -from magnum.tests.functional.common import datagen - - -HEADERS = {'OpenStack-API-Version': 'container-infra latest', - 'Accept': 'application/json', - 'Content-Type': 'application/json'} - - -class ClusterTest(base.BaseTempestTest): - - """Tests for cluster CRUD.""" - - LOG = logging.getLogger(__name__) - - def __init__(self, *args, **kwargs): - super(ClusterTest, self).__init__(*args, **kwargs) - self.clusters = [] - self.creds = None - self.keypair = None - self.cluster_template = None - self.cluster_template_client = None - self.keypairs_client = None - self.cluster_client = None - self.cert_client = None - - def setUp(self): - try: - super(ClusterTest, self).setUp() - (self.creds, self.keypair) = self.get_credentials_with_keypair( - type_of_creds='default') - (self.cluster_template_client, - self.keypairs_client) = self.get_clients_with_existing_creds( - creds=self.creds, - type_of_creds='default', - request_type='cluster_template') - (self.cluster_client, _) = self.get_clients_with_existing_creds( - creds=self.creds, - type_of_creds='default', - request_type='cluster') - (self.cert_client, _) = self.get_clients_with_existing_creds( - creds=self.creds, - type_of_creds='default', - request_type='cert') - model = datagen.valid_swarm_cluster_template() - _, self.cluster_template = self._create_cluster_template(model) - - # NOTE (dimtruck) by default tempest sets timeout to 20 mins. - # We need more time. - test_timeout = 1800 - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - except Exception: - self.tearDown() - raise - - def tearDown(self): - try: - cluster_list = self.clusters[:] - for cluster_id in cluster_list: - self._delete_cluster(cluster_id) - self.clusters.remove(cluster_id) - if self.cluster_template: - self._delete_cluster_template(self.cluster_template.uuid) - finally: - super(ClusterTest, self).tearDown() - - def _create_cluster_template(self, cm_model): - self.LOG.debug('We will create a clustertemplate for %s', cm_model) - resp, model = self.cluster_template_client.post_cluster_template( - cm_model) - return resp, model - - def _delete_cluster_template(self, cm_id): - self.LOG.debug('We will delete a clustertemplate for %s', cm_id) - resp, model = self.cluster_template_client.delete_cluster_template( - cm_id) - return resp, model - - def _create_cluster(self, cluster_model): - self.LOG.debug('We will create cluster for %s', cluster_model) - resp, model = self.cluster_client.post_cluster(cluster_model) - self.LOG.debug('Response: %s', resp) - self.assertEqual(202, resp.status) - self.assertIsNotNone(model.uuid) - self.assertTrue(uuidutils.is_uuid_like(model.uuid)) - self.clusters.append(model.uuid) - self.cluster_uuid = model.uuid - if config.Config.copy_logs: - self.addCleanup(self.copy_logs_handler( - lambda: list( - [self._get_cluster_by_id(model.uuid)[1].master_addresses, - self._get_cluster_by_id(model.uuid)[1].node_addresses]), - self.cluster_template.coe, - self.keypair)) - self.cluster_client.wait_for_created_cluster(model.uuid, - delete_on_error=False) - return resp, model - - def _delete_cluster(self, cluster_id): - self.LOG.debug('We will delete a cluster for %s', cluster_id) - resp, model = self.cluster_client.delete_cluster(cluster_id) - self.assertEqual(204, resp.status) - self.cluster_client.wait_for_cluster_to_delete(cluster_id) - self.assertRaises(exceptions.NotFound, self.cert_client.get_cert, - cluster_id, headers=HEADERS) - return resp, model - - def _get_cluster_by_id(self, cluster_id): - resp, model = self.cluster_client.get_cluster(cluster_id) - return resp, model - - # (dimtruck) Combining all these tests in one because - # they time out on the gate (2 hours not enough) - @testtools.testcase.attr('positive') - def test_create_list_sign_delete_clusters(self): - gen_model = datagen.valid_cluster_data( - cluster_template_id=self.cluster_template.uuid, node_count=1) - - # test cluster create - _, cluster_model = self._create_cluster(gen_model) - self.assertNotIn('status', cluster_model) - - # test cluster list - resp, cluster_list_model = self.cluster_client.list_clusters() - self.assertEqual(200, resp.status) - self.assertGreater(len(cluster_list_model.clusters), 0) - self.assertIn( - cluster_model.uuid, list([x['uuid'] - for x in cluster_list_model.clusters])) - - # test invalid cluster update - patch_model = datagen.cluster_name_patch_data() - self.assertRaises( - exceptions.BadRequest, - self.cluster_client.patch_cluster, - cluster_model.uuid, patch_model) - - # test ca show - resp, cert_model = self.cert_client.get_cert( - cluster_model.uuid, headers=HEADERS) - self.LOG.debug("cert resp: %s", resp) - self.assertEqual(200, resp.status) - self.assertEqual(cert_model.cluster_uuid, cluster_model.uuid) - self.assertIsNotNone(cert_model.pem) - self.assertIn('-----BEGIN CERTIFICATE-----', cert_model.pem) - self.assertIn('-----END CERTIFICATE-----', cert_model.pem) - - # test ca sign - - csr_sample = """-----BEGIN CERTIFICATE REQUEST----- -MIIByjCCATMCAQAwgYkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlh -MRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMR8w -HQYDVQQLExZJbmZvcm1hdGlvbiBUZWNobm9sb2d5MRcwFQYDVQQDEw53d3cuZ29v -Z2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEApZtYJCHJ4VpVXHfV -IlstQTlO4qC03hjX+ZkPyvdYd1Q4+qbAeTwXmCUKYHThVRd5aXSqlPzyIBwieMZr -WFlRQddZ1IzXAlVRDWwAo60KecqeAXnnUK+5fXoTI/UgWshre8tJ+x/TMHaQKR/J -cIWPhqaQhsJuzZbvAdGA80BLxdMCAwEAAaAAMA0GCSqGSIb3DQEBBQUAA4GBAIhl -4PvFq+e7ipARgI5ZM+GZx6mpCz44DTo0JkwfRDf+BtrsaC0q68eTf2XhYOsq4fkH -Q0uA0aVog3f5iJxCa3Hp5gxbJQ6zV6kJ0TEsuaaOhEko9sdpCoPOnRBm2i/XRD2D -6iNh8f8z0ShGsFqjDgFHyF3o+lUyj+UC6H1QW7bn ------END CERTIFICATE REQUEST----- -""" - - cert_data_model = datagen.cert_data(cluster_model.uuid, - csr_data=csr_sample) - resp, cert_model = self.cert_client.post_cert(cert_data_model, - headers=HEADERS) - self.LOG.debug("cert resp: %s", resp) - self.assertEqual(201, resp.status) - self.assertEqual(cert_model.cluster_uuid, cluster_model.uuid) - self.assertIsNotNone(cert_model.pem) - self.assertIn('-----BEGIN CERTIFICATE-----', cert_model.pem) - self.assertIn('-----END CERTIFICATE-----', cert_model.pem) - - # test ca sign invalid - cert_data_model = datagen.cert_data(cluster_model.uuid, - csr_data="invalid_csr") - self.assertRaises( - exceptions.BadRequest, - self.cert_client.post_cert, - cert_data_model, headers=HEADERS) - - # test cluster delete - self._delete_cluster(cluster_model.uuid) - self.clusters.remove(cluster_model.uuid) - - @testtools.testcase.attr('negative') - def test_create_cluster_for_nonexisting_cluster_template(self): - cm_id = 'this-does-not-exist' - gen_model = datagen.valid_cluster_data(cluster_template_id=cm_id) - self.assertRaises( - exceptions.BadRequest, - self.cluster_client.post_cluster, gen_model) - - @testtools.testcase.attr('negative') - def test_create_cluster_with_node_count_0(self): - gen_model = datagen.valid_cluster_data( - cluster_template_id=self.cluster_template.uuid, node_count=0) - self.assertRaises( - exceptions.BadRequest, - self.cluster_client.post_cluster, gen_model) - - @testtools.testcase.attr('negative') - def test_create_cluster_with_zero_masters(self): - uuid = self.cluster_template.uuid - gen_model = datagen.valid_cluster_data(cluster_template_id=uuid, - master_count=0) - self.assertRaises( - exceptions.BadRequest, - self.cluster_client.post_cluster, gen_model) - - @testtools.testcase.attr('negative') - def test_create_cluster_with_nonexisting_flavor(self): - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - resp, cluster_template = self._create_cluster_template(gen_model) - self.assertEqual(201, resp.status) - self.assertIsNotNone(cluster_template.uuid) - - uuid = cluster_template.uuid - gen_model = datagen.valid_cluster_data(cluster_template_id=uuid) - gen_model.flavor_id = 'aaa' - self.assertRaises(exceptions.BadRequest, - self.cluster_client.post_cluster, gen_model) - - resp, _ = self._delete_cluster_template(cluster_template.uuid) - self.assertEqual(204, resp.status) - - @testtools.testcase.attr('negative') - def test_update_cluster_for_nonexisting_cluster(self): - patch_model = datagen.cluster_name_patch_data() - - self.assertRaises( - exceptions.NotFound, - self.cluster_client.patch_cluster, 'fooo', patch_model) - - @testtools.testcase.attr('negative') - def test_delete_cluster_for_nonexisting_cluster(self): - self.assertRaises( - exceptions.NotFound, - self.cluster_client.delete_cluster, data_utils.rand_uuid()) diff --git a/magnum/tests/functional/api/v1/test_cluster_template.py b/magnum/tests/functional/api/v1/test_cluster_template.py deleted file mode 100644 index b0053734..00000000 --- a/magnum/tests/functional/api/v1/test_cluster_template.py +++ /dev/null @@ -1,230 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from tempest.lib.common.utils import data_utils -from tempest.lib import exceptions -import testtools - -from magnum.tests.functional.api import base -from magnum.tests.functional.common import datagen - - -class ClusterTemplateTest(base.BaseTempestTest): - - """Tests for clustertemplate CRUD.""" - - def __init__(self, *args, **kwargs): - super(ClusterTemplateTest, self).__init__(*args, **kwargs) - self.cluster_templates = [] - self.cluster_template_client = None - self.keypairs_client = None - - def setUp(self): - try: - super(ClusterTemplateTest, self).setUp() - (self.cluster_template_client, - self.keypairs_client) = self.get_clients_with_new_creds( - type_of_creds='default', - request_type='cluster_template') - except Exception: - self.tearDown() - raise - - def tearDown(self): - for cluster_template_id in self.cluster_templates: - self._delete_cluster_template(cluster_template_id) - self.cluster_templates.remove(cluster_template_id) - super(ClusterTemplateTest, self).tearDown() - - def _create_cluster_template(self, cmodel_model): - resp, model = \ - self.cluster_template_client.post_cluster_template(cmodel_model) - self.assertEqual(201, resp.status) - self.cluster_templates.append(model.uuid) - return resp, model - - def _delete_cluster_template(self, model_id): - resp, model = \ - self.cluster_template_client.delete_cluster_template(model_id) - self.assertEqual(204, resp.status) - return resp, model - - @testtools.testcase.attr('positive') - def test_list_cluster_templates(self): - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - _, temp_model = self._create_cluster_template(gen_model) - resp, model = self.cluster_template_client.list_cluster_templates() - self.assertEqual(200, resp.status) - self.assertGreater(len(model.clustertemplates), 0) - self.assertIn( - temp_model.uuid, - list([x['uuid'] for x in model.clustertemplates])) - - @testtools.testcase.attr('positive') - def test_create_cluster_template(self): - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - resp, model = self._create_cluster_template(gen_model) - - @testtools.testcase.attr('positive') - def test_create_get_public_cluster_template(self): - gen_model = datagen.valid_swarm_cluster_template(is_public=True) - self.assertRaises( - exceptions.Forbidden, - self.cluster_template_client.post_cluster_template, gen_model) - - @testtools.testcase.attr('positive') - def test_update_cluster_template_public_by_uuid(self): - path = "/public" - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - resp, old_model = self._create_cluster_template(gen_model) - - patch_model = datagen.cluster_template_replace_patch_data(path, - value=True) - self.assertRaises( - exceptions.Forbidden, - self.cluster_template_client.patch_cluster_template, - old_model.uuid, patch_model) - - @testtools.testcase.attr('positive') - def test_update_cluster_template_by_uuid(self): - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - resp, old_model = self._create_cluster_template(gen_model) - - patch_model = datagen.cluster_template_name_patch_data() - resp, new_model = self.cluster_template_client.patch_cluster_template( - old_model.uuid, patch_model) - self.assertEqual(200, resp.status) - - resp, model = \ - self.cluster_template_client.get_cluster_template(new_model.uuid) - self.assertEqual(200, resp.status) - self.assertEqual(old_model.uuid, new_model.uuid) - self.assertEqual(model.name, new_model.name) - - @testtools.testcase.attr('positive') - def test_delete_cluster_template_by_uuid(self): - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - resp, model = self._create_cluster_template(gen_model) - resp, _ = self.cluster_template_client.delete_cluster_template( - model.uuid) - self.assertEqual(204, resp.status) - self.cluster_templates.remove(model.uuid) - - @testtools.testcase.attr('positive') - def test_delete_cluster_template_by_name(self): - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - resp, model = self._create_cluster_template(gen_model) - resp, _ = self.cluster_template_client.delete_cluster_template( - model.name) - self.assertEqual(204, resp.status) - self.cluster_templates.remove(model.uuid) - - @testtools.testcase.attr('negative') - def test_get_cluster_template_by_uuid_404(self): - self.assertRaises( - exceptions.NotFound, - self.cluster_template_client.get_cluster_template, - data_utils.rand_uuid()) - - @testtools.testcase.attr('negative') - def test_update_cluster_template_404(self): - patch_model = datagen.cluster_template_name_patch_data() - - self.assertRaises( - exceptions.NotFound, - self.cluster_template_client.patch_cluster_template, - data_utils.rand_uuid(), patch_model) - - @testtools.testcase.attr('negative') - def test_delete_cluster_template_404(self): - self.assertRaises( - exceptions.NotFound, - self.cluster_template_client.delete_cluster_template, - data_utils.rand_uuid()) - - @testtools.testcase.attr('negative') - def test_get_cluster_template_by_name_404(self): - self.assertRaises( - exceptions.NotFound, - self.cluster_template_client.get_cluster_template, 'fooo') - - @testtools.testcase.attr('negative') - def test_update_cluster_template_name_not_found(self): - patch_model = datagen.cluster_template_name_patch_data() - - self.assertRaises( - exceptions.NotFound, - self.cluster_template_client.patch_cluster_template, - 'fooo', patch_model) - - @testtools.testcase.attr('negative') - def test_delete_cluster_template_by_name_404(self): - self.assertRaises( - exceptions.NotFound, - self.cluster_template_client.get_cluster_template, 'fooo') - - @testtools.testcase.attr('negative') - def test_create_cluster_template_missing_image(self): - gen_model = datagen.cluster_template_data_with_missing_image() - self.assertRaises( - exceptions.BadRequest, - self.cluster_template_client.post_cluster_template, gen_model) - - @testtools.testcase.attr('negative') - def test_create_cluster_template_missing_flavor(self): - gen_model = datagen.cluster_template_data_with_missing_flavor() - self.assertRaises( - exceptions.BadRequest, - self.cluster_template_client.post_cluster_template, gen_model) - - @testtools.testcase.attr('positive') - def test_create_cluster_template_missing_keypair(self): - gen_model = \ - datagen.cluster_template_data_with_missing_keypair() - resp, model = self._create_cluster_template(gen_model) - - @testtools.testcase.attr('negative') - def test_update_cluster_template_invalid_patch(self): - # get json object - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - resp, old_model = self._create_cluster_template(gen_model) - - self.assertRaises( - exceptions.BadRequest, - self.cluster_template_client.patch_cluster_template, - data_utils.rand_uuid(), gen_model) - - @testtools.testcase.attr('negative') - def test_create_cluster_template_invalid_network_driver(self): - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - gen_model.network_driver = 'invalid_network_driver' - self.assertRaises( - exceptions.BadRequest, - self.cluster_template_client.post_cluster_template, gen_model) - - @testtools.testcase.attr('negative') - def test_create_cluster_template_invalid_volume_driver(self): - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - gen_model.volume_driver = 'invalid_volume_driver' - self.assertRaises( - exceptions.BadRequest, - self.cluster_template_client.post_cluster_template, gen_model) diff --git a/magnum/tests/functional/api/v1/test_cluster_template_admin.py b/magnum/tests/functional/api/v1/test_cluster_template_admin.py deleted file mode 100644 index 22f4ee9e..00000000 --- a/magnum/tests/functional/api/v1/test_cluster_template_admin.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import testtools - -from magnum.tests.functional.api import base -from magnum.tests.functional.common import datagen - - -class ClusterTemplateAdminTest(base.BaseTempestTest): - - """Tests for clustertemplate admin operations.""" - - def __init__(self, *args, **kwargs): - super(ClusterTemplateAdminTest, self).__init__(*args, **kwargs) - self.cluster_templates = [] - self.cluster_template_client = None - self.keypairs_client = None - - def setUp(self): - try: - super(ClusterTemplateAdminTest, self).setUp() - (self.cluster_template_client, - self.keypairs_client) = self.get_clients_with_new_creds( - type_of_creds='admin', - request_type='cluster_template') - except Exception: - self.tearDown() - raise - - def tearDown(self): - for cluster_template_id in self.cluster_templates: - self._delete_cluster_template(cluster_template_id) - self.cluster_templates.remove(cluster_template_id) - super(ClusterTemplateAdminTest, self).tearDown() - - def _create_cluster_template(self, cmodel_model): - resp, model = \ - self.cluster_template_client.post_cluster_template(cmodel_model) - self.assertEqual(201, resp.status) - self.cluster_templates.append(model.uuid) - return resp, model - - def _delete_cluster_template(self, model_id): - resp, model = \ - self.cluster_template_client.delete_cluster_template(model_id) - self.assertEqual(204, resp.status) - return resp, model - - @testtools.testcase.attr('positive') - def test_create_get_public_cluster_template(self): - gen_model = datagen.valid_swarm_cluster_template(is_public=True) - resp, model = self._create_cluster_template(gen_model) - - resp, model = \ - self.cluster_template_client.get_cluster_template(model.uuid) - self.assertEqual(200, resp.status) - self.assertTrue(model.public) - - @testtools.testcase.attr('positive') - def test_update_cluster_template_public_by_uuid(self): - path = "/public" - gen_model = \ - datagen.cluster_template_data_with_valid_keypair_image_flavor() - resp, old_model = self._create_cluster_template(gen_model) - - patch_model = datagen.cluster_template_replace_patch_data(path, - value=True) - resp, new_model = self.cluster_template_client.patch_cluster_template( - old_model.uuid, patch_model) - self.assertEqual(200, resp.status) - - resp, model = self.cluster_template_client.get_cluster_template( - new_model.uuid) - self.assertEqual(200, resp.status) - self.assertTrue(model.public) diff --git a/magnum/tests/functional/api/v1/test_magnum_service.py b/magnum/tests/functional/api/v1/test_magnum_service.py deleted file mode 100644 index a85c30b4..00000000 --- a/magnum/tests/functional/api/v1/test_magnum_service.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from tempest.lib import exceptions -import testtools - -from magnum.tests.functional.api import base - - -class MagnumServiceTest(base.BaseTempestTest): - - """Tests for magnum-service .""" - - def __init__(self, *args, **kwargs): - super(MagnumServiceTest, self).__init__(*args, **kwargs) - self.service_client = None - - @testtools.testcase.attr('negative') - def test_magnum_service_list_needs_admin(self): - # Ensure that policy enforcement does not allow 'default' user - (self.service_client, _) = self.get_clients_with_new_creds( - type_of_creds='default', - request_type='service') - self.assertRaises(exceptions.Forbidden, - self.service_client.magnum_service_list) - - @testtools.testcase.attr('positive') - def test_magnum_service_list(self): - # get json object - (self.service_client, _) = self.get_clients_with_new_creds( - type_of_creds='admin', - request_type='service', - class_cleanup=False) - resp, msvcs = self.service_client.magnum_service_list() - self.assertEqual(200, resp.status) - # Note(suro-patz): Following code assumes that we have only - # one service, magnum-conductor enabled, as of now. - self.assertEqual(1, len(msvcs.mservices)) - mcond_svc = msvcs.mservices[0] - self.assertEqual(mcond_svc['id'], 1) - self.assertEqual('up', mcond_svc['state']) - self.assertEqual('magnum-conductor', mcond_svc['binary']) - self.assertGreater(mcond_svc['report_count'], 0) diff --git a/magnum/tests/functional/common/__init__.py b/magnum/tests/functional/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/common/base.py b/magnum/tests/functional/common/base.py deleted file mode 100755 index f568cfb0..00000000 --- a/magnum/tests/functional/common/base.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import subprocess - -from tempest.lib import base - -import magnum - - -COPY_LOG_HELPER = "magnum/tests/contrib/copy_instance_logs.sh" - - -class BaseMagnumTest(base.BaseTestCase): - """Sets up configuration required for functional tests""" - - LOG = logging.getLogger(__name__) - - def __init__(self, *args, **kwargs): - super(BaseMagnumTest, self).__init__(*args, **kwargs) - - @classmethod - def copy_logs_handler(cls, get_nodes_fn, coe, keypair): - """Copy logs closure. - - This method will retrieve all running nodes for a specified cluster - and copy addresses from there locally. - - :param get_nodes_fn: function that takes no parameters and returns - a list of node IPs which are in such form: - [[master_nodes], [slave_nodes]]. - :param coe: the COE type of the nodes - """ - def int_copy_logs(): - try: - cls.LOG.info("Copying logs...") - func_name = "test" - msg = ("Failed to copy logs for cluster") - nodes_addresses = get_nodes_fn() - - master_nodes = nodes_addresses[0] - slave_nodes = nodes_addresses[1] - - base_path = os.path.split(os.path.dirname( - os.path.abspath(magnum.__file__)))[0] - full_location = os.path.join(base_path, COPY_LOG_HELPER) - - def do_copy_logs(prefix, nodes_address): - if not nodes_address: - return - - msg = "copy logs from : %s" % ','.join(nodes_address) - cls.LOG.info(msg) - log_name = prefix + "-" + func_name - for node_address in nodes_address: - try: - cls.LOG.debug("running %s", full_location) - cls.LOG.debug("keypair: %s", keypair) - subprocess.check_call([ - full_location, - node_address, - coe, - log_name, - str(keypair) - ]) - except Exception: - cls.LOG.error(msg) - msg = ( - "failed to copy from %(node_address)s " - "to %(base_path)s%(log_name)s-" - "%(node_address)s" % - {'node_address': node_address, - 'base_path': "/opt/stack/logs/cluster-nodes/", - 'log_name': log_name}) - cls.LOG.exception(msg) - - do_copy_logs('master', master_nodes) - do_copy_logs('node', slave_nodes) - except Exception: - cls.LOG.exception(msg) - - return int_copy_logs diff --git a/magnum/tests/functional/common/client.py b/magnum/tests/functional/common/client.py deleted file mode 100644 index d61c8da8..00000000 --- a/magnum/tests/functional/common/client.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six -from six.moves.urllib import parse -from tempest.lib.common import rest_client - -from magnum.tests.functional.common import config - - -@six.add_metaclass(abc.ABCMeta) -class MagnumClient(rest_client.RestClient): - """Abstract class responsible for setting up auth provider""" - - def __init__(self, auth_provider): - super(MagnumClient, self).__init__( - auth_provider=auth_provider, - service='container-infra', - region=config.Config.region, - disable_ssl_certificate_validation=True - ) - - @classmethod - def deserialize(cls, resp, body, model_type): - return resp, model_type.from_json(body) - - @property - def tenant_id(self): - return self.client.tenant_id - - @classmethod - def add_filters(cls, url, filters): - """add_filters adds dict values (filters) to url as query parameters - - :param url: base URL for the request - :param filters: dict with var:val pairs to add as parameters to URL - :returns: url string - """ - return url + "?" + parse(filters) diff --git a/magnum/tests/functional/common/config.py b/magnum/tests/functional/common/config.py deleted file mode 100644 index 646f5369..00000000 --- a/magnum/tests/functional/common/config.py +++ /dev/null @@ -1,157 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import warnings - -from tempest import config - -from oslo_config import cfg - -CONF = config.CONF - - -class Config(object): - - """Parses configuration to attributes required for auth and test data""" - - @classmethod - def set_admin_creds(cls, config): - cls.admin_user = CONF.auth.admin_username - cls.admin_passwd = CONF.auth.admin_password - # NOTE(toabctl): also allow the old style tempest definition - try: - cls.admin_tenant = CONF.auth.admin_project_name - except cfg.NoSuchOptError: - cls.admin_tenant = CONF.auth.admin_tenant_name - warnings.warn("the config option 'admin_tenant_name' from the " - "'auth' section is deprecated. Please switch " - "to 'admin_project_name'.") - - @classmethod - def set_user_creds(cls, config): - # normal user creds - # Fixme(eliqiao): this is quick workaround to passing tempest - # legacy credentials provider is removed by tempest - # I8c24cd17f643083dde71ab2bd2a38417c54aeccb. - # TODO(eliqiao): find a way to using an accounts.yaml file - # check Ia5132c5cb32355d6f26b8acdd92a0e55a2c19f41 - cls.user = CONF.auth.admin_username - cls.passwd = CONF.auth.admin_password - # NOTE(toabctl): also allow the old style tempest definition - try: - cls.tenant = CONF.auth.admin_project_name - except cfg.NoSuchOptError: - cls.tenant = CONF.auth.admin_tenant_name - warnings.warn("the config option 'admin_tenant_name' from the " - "'auth' section is deprecated. Please switch " - "to 'admin_project_name'.") - - @classmethod - def set_auth_version(cls, config): - # auth version for client authentication - cls.auth_version = CONF.identity.auth_version - - @classmethod - def set_auth_url(cls, config): - # auth_url for client authentication - if cls.auth_version == 'v3': - cls.auth_v3_url = CONF.identity.uri_v3 - else: - if 'uri' not in CONF.identity: - raise Exception('config missing auth_url key') - cls.auth_url = CONF.identity.uri - - @classmethod - def set_admin_role(cls, config): - # admin_role for client authentication - if cls.auth_version == 'v3': - cls.admin_role = CONF.identity.admin_role - else: - cls.admin_role = 'admin' - - @classmethod - def set_region(cls, config): - if 'region' in CONF.identity: - cls.region = CONF.identity.region - else: - cls.region = 'RegionOne' - - @classmethod - def set_image_id(cls, config): - if 'image_id' not in CONF.magnum: - raise Exception('config missing image_id key') - cls.image_id = CONF.magnum.image_id - - @classmethod - def set_nic_id(cls, config): - if 'nic_id' not in CONF.magnum: - raise Exception('config missing nic_id key') - cls.nic_id = CONF.magnum.nic_id - - @classmethod - def set_keypair_id(cls, config): - if 'keypair_id' not in CONF.magnum: - raise Exception('config missing keypair_id key') - cls.keypair_id = CONF.magnum.keypair_id - - @classmethod - def set_flavor_id(cls, config): - if 'flavor_id' not in CONF.magnum: - raise Exception('config missing flavor_id key') - cls.flavor_id = CONF.magnum.flavor_id - - @classmethod - def set_magnum_url(cls, config): - cls.magnum_url = CONF.magnum.get('magnum_url', None) - - @classmethod - def set_master_flavor_id(cls, config): - if 'master_flavor_id' not in CONF.magnum: - raise Exception('config missing master_flavor_id key') - cls.master_flavor_id = CONF.magnum.master_flavor_id - - @classmethod - def set_csr_location(cls, config): - if 'csr_location' not in CONF.magnum: - raise Exception('config missing csr_location key') - cls.csr_location = CONF.magnum.csr_location - - @classmethod - def set_dns_nameserver(cls, config): - if 'dns_nameserver' not in CONF.magnum: - raise Exception('config missing dns_nameserver') - cls.dns_nameserver = CONF.magnum.dns_nameserver - - @classmethod - def set_copy_logs(cls, config): - if 'copy_logs' not in CONF.magnum: - cls.copy_logs = True - cls.copy_logs = str(CONF.magnum.copy_logs).lower() == 'true' - - @classmethod - def setUp(cls): - cls.set_admin_creds(config) - cls.set_user_creds(config) - cls.set_auth_version(config) - cls.set_auth_url(config) - cls.set_admin_role(config) - - cls.set_region(config) - cls.set_image_id(config) - cls.set_nic_id(config) - cls.set_keypair_id(config) - cls.set_flavor_id(config) - cls.set_magnum_url(config) - cls.set_master_flavor_id(config) - cls.set_csr_location(config) - cls.set_dns_nameserver(config) - cls.set_copy_logs(config) diff --git a/magnum/tests/functional/common/datagen.py b/magnum/tests/functional/common/datagen.py deleted file mode 100644 index 2fa3fc4a..00000000 --- a/magnum/tests/functional/common/datagen.py +++ /dev/null @@ -1,600 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random -import socket -import string -import struct - -from tempest.lib.common.utils import data_utils - -from magnum.tests.functional.api.v1.models import bay_model -from magnum.tests.functional.api.v1.models import baymodel_model -from magnum.tests.functional.api.v1.models import baymodelpatch_model -from magnum.tests.functional.api.v1.models import baypatch_model -from magnum.tests.functional.api.v1.models import cert_model -from magnum.tests.functional.api.v1.models import cluster_model -from magnum.tests.functional.api.v1.models import cluster_template_model -from magnum.tests.functional.api.v1.models import cluster_templatepatch_model -from magnum.tests.functional.api.v1.models import clusterpatch_model -from magnum.tests.functional.common import config - - -def random_int(min_int=1, max_int=100): - return random.randrange(min_int, max_int) - - -def gen_coe_dep_network_driver(coe): - allowed_driver_types = { - 'kubernetes': ['flannel', None], - 'swarm': ['docker', 'flannel', None], - 'mesos': ['docker', None], - } - driver_types = allowed_driver_types[coe] - return driver_types[random.randrange(0, len(driver_types))] - - -def gen_coe_dep_volume_driver(coe): - allowed_driver_types = { - 'kubernetes': ['cinder', None], - 'swarm': ['rexray', None], - 'mesos': ['rexray', None], - } - driver_types = allowed_driver_types[coe] - return driver_types[random.randrange(0, len(driver_types))] - - -def gen_random_port(): - return random_int(49152, 65535) - - -def gen_docker_volume_size(min_int=3, max_int=5): - return random_int(min_int, max_int) - - -def gen_fake_ssh_pubkey(): - chars = "".join( - random.choice(string.ascii_uppercase + - string.ascii_letters + string.digits + '/+=') - for _ in range(372)) - return "ssh-rsa " + chars - - -def gen_random_ip(): - return socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff))) - - -def gen_url(scheme="http", domain="example.com", port=80): - return "%s://%s:%s" % (scheme, domain, port) - - -def gen_http_proxy(): - return gen_url(port=gen_random_port()) - - -def gen_https_proxy(): - return gen_url(scheme="https", port=gen_random_port()) - - -def gen_no_proxy(): - return ",".join(gen_random_ip() for x in range(3)) - - -def baymodel_data(**kwargs): - """Generates random baymodel data - - Keypair and image id cannot be random for the baymodel to be valid due to - validations for the presence of keypair and image id prior to baymodel - creation. - - :param keypair_id: keypair name - :param image_id: image id or name - :returns: BayModelEntity with generated data - """ - - data = { - "name": data_utils.rand_name('bay'), - "coe": "swarm", - "tls_disabled": False, - "network_driver": None, - "volume_driver": None, - "labels": {}, - "public": False, - "dns_nameserver": "8.8.8.8", - "flavor_id": data_utils.rand_name('bay'), - "master_flavor_id": data_utils.rand_name('bay'), - "external_network_id": config.Config.nic_id, - "keypair_id": data_utils.rand_name('bay'), - "image_id": data_utils.rand_name('bay') - } - - data.update(kwargs) - model = baymodel_model.BayModelEntity.from_dict(data) - - return model - - -def baymodel_replace_patch_data(path, value=data_utils.rand_name('bay')): - """Generates random baymodel patch data - - :param path: path to replace - :param value: value to replace in patch - :returns: BayModelPatchCollection with generated data - """ - - data = [{ - "path": path, - "value": value, - "op": "replace" - }] - return baymodelpatch_model.BayModelPatchCollection.from_dict(data) - - -def baymodel_remove_patch_data(path): - """Generates baymodel patch data by removing value - - :param path: path to remove - :returns: BayModelPatchCollection with generated data - """ - - data = [{ - "path": path, - "op": "remove" - }] - return baymodelpatch_model.BayModelPatchCollection.from_dict(data) - - -def baymodel_data_with_valid_keypair_image_flavor(): - """Generates random baymodel data with valid keypair,image and flavor - - :returns: BayModelEntity with generated data - """ - - return baymodel_data(keypair_id=config.Config.keypair_id, - image_id=config.Config.image_id, - flavor_id=config.Config.flavor_id, - master_flavor_id=config.Config.master_flavor_id) - - -def baymodel_data_with_missing_image(): - """Generates random baymodel data with missing image - - :returns: BayModelEntity with generated data - """ - - return baymodel_data(keypair_id=config.Config.keypair_id, - flavor_id=config.Config.flavor_id, - master_flavor_id=config.Config.master_flavor_id) - - -def baymodel_data_with_missing_flavor(): - """Generates random baymodel data with missing flavor - - :returns: BayModelEntity with generated data - """ - - return baymodel_data(keypair_id=config.Config.keypair_id, - image_id=config.Config.image_id) - - -def baymodel_data_with_missing_keypair(): - """Generates random baymodel data with missing keypair - - :returns: BayModelEntity with generated data - """ - - return baymodel_data(image_id=config.Config.image_id, - flavor_id=config.Config.flavor_id, - master_flavor_id=config.Config.master_flavor_id) - - -def baymodel_valid_data_with_specific_coe(coe): - """Generates random baymodel data with valid keypair and image - - :param coe: coe - :returns: BayModelEntity with generated data - """ - - return baymodel_data(keypair_id=config.Config.keypair_id, - image_id=config.Config.image_id, coe=coe) - - -def valid_swarm_baymodel(is_public=False): - """Generates a valid swarm baymodel with valid data - - :returns: BayModelEntity with generated data - """ - - return baymodel_data(image_id=config.Config.image_id, - flavor_id=config.Config.flavor_id, public=is_public, - dns_nameserver=config.Config.dns_nameserver, - master_flavor_id=config.Config.master_flavor_id, - keypair_id=config.Config.keypair_id, coe="swarm", - cluster_distro=None, - external_network_id=config.Config.nic_id, - http_proxy=None, https_proxy=None, no_proxy=None, - network_driver=None, volume_driver=None, labels={}, - tls_disabled=False) - - -def bay_data(name=data_utils.rand_name('bay'), - baymodel_id=data_utils.rand_uuid(), - node_count=random_int(1, 5), discovery_url=gen_random_ip(), - bay_create_timeout=random_int(1, 30), - master_count=random_int(1, 5)): - """Generates random bay data - - BayModel_id cannot be random for the bay to be valid due to - validations for the presence of baymodel prior to baymodel - creation. - - :param name: bay name (must be unique) - :param baymodel_id: baymodel unique id (must already exist) - :param node_count: number of agents for bay - :param discovery_url: url provided for node discovery - :param bay_create_timeout: timeout in minutes for bay create - :param master_count: number of master nodes for the bay - :returns: BayEntity with generated data - """ - - data = { - "name": name, - "baymodel_id": baymodel_id, - "node_count": node_count, - "discovery_url": None, - "bay_create_timeout": bay_create_timeout, - "master_count": master_count - } - model = bay_model.BayEntity.from_dict(data) - - return model - - -def valid_bay_data(baymodel_id, name=data_utils.rand_name('bay'), node_count=1, - master_count=1, bay_create_timeout=None): - """Generates random bay data with valid - - :param baymodel_id: baymodel unique id that already exists - :param name: bay name (must be unique) - :param node_count: number of agents for bay - :returns: BayEntity with generated data - """ - - return bay_data(baymodel_id=baymodel_id, name=name, - master_count=master_count, node_count=node_count, - bay_create_timeout=bay_create_timeout) - - -def bay_name_patch_data(name=data_utils.rand_name('bay')): - """Generates random baymodel patch data - - :param name: name to replace in patch - :returns: BayPatchCollection with generated data - """ - - data = [{ - "path": "/name", - "value": name, - "op": "replace" - }] - return baypatch_model.BayPatchCollection.from_dict(data) - - -def bay_api_addy_patch_data(address='0.0.0.0'): - """Generates random bay patch data - - :param name: name to replace in patch - :returns: BayPatchCollection with generated data - """ - - data = [{ - "path": "/api_address", - "value": address, - "op": "replace" - }] - return baypatch_model.BayPatchCollection.from_dict(data) - - -def bay_node_count_patch_data(node_count=2): - """Generates random bay patch data - - :param name: name to replace in patch - :returns: BayPatchCollection with generated data - """ - - data = [{ - "path": "/node_count", - "value": node_count, - "op": "replace" - }] - return baypatch_model.BayPatchCollection.from_dict(data) - - -def cert_data(cluster_uuid, csr_data): - data = { - "cluster_uuid": cluster_uuid, - "csr": csr_data} - - model = cert_model.CertEntity.from_dict(data) - - return model - - -def cluster_template_data(**kwargs): - """Generates random cluster_template data - - Keypair and image id cannot be random for the cluster_template to be valid - due to validations for the presence of keypair and image id prior to - cluster_template creation. - - :param keypair_id: keypair name - :param image_id: image id or name - :returns: ClusterTemplateEntity with generated data - """ - - data = { - "name": data_utils.rand_name('cluster'), - "coe": "swarm", - "tls_disabled": False, - "network_driver": None, - "volume_driver": None, - "labels": {}, - "public": False, - "dns_nameserver": "8.8.8.8", - "flavor_id": data_utils.rand_name('cluster'), - "master_flavor_id": data_utils.rand_name('cluster'), - "external_network_id": config.Config.nic_id, - "keypair_id": data_utils.rand_name('cluster'), - "image_id": data_utils.rand_name('cluster') - } - - data.update(kwargs) - model = cluster_template_model.ClusterTemplateEntity.from_dict(data) - - return model - - -def cluster_template_replace_patch_data(path, - value=data_utils.rand_name('cluster')): - """Generates random ClusterTemplate patch data - - :param path: path to replace - :param value: value to replace in patch - :returns: ClusterTemplatePatchCollection with generated data - """ - - data = [{ - "path": path, - "value": value, - "op": "replace" - }] - collection = cluster_templatepatch_model.ClusterTemplatePatchCollection - return collection.from_dict(data) - - -def cluster_template_remove_patch_data(path): - """Generates ClusterTemplate patch data by removing value - - :param path: path to remove - :returns: ClusterTemplatePatchCollection with generated data - """ - - data = [{ - "path": path, - "op": "remove" - }] - collection = cluster_templatepatch_model.ClusterTemplatePatchCollection - return collection.from_dict(data) - - -def cluster_template_name_patch_data(name=data_utils.rand_name('cluster')): - """Generates random cluster_template patch data - - :param name: name to replace in patch - :returns: ClusterTemplatePatchCollection with generated data - """ - - data = [{ - "path": "/name", - "value": name, - "op": "replace" - }] - collection = cluster_templatepatch_model.ClusterTemplatePatchCollection - return collection.from_dict(data) - - -def cluster_template_flavor_patch_data(flavor=data_utils.rand_name('cluster')): - """Generates random cluster_template patch data - - :param flavor: flavor to replace in patch - :returns: ClusterTemplatePatchCollection with generated data - """ - - data = [{ - "path": "/flavor_id", - "value": flavor, - "op": "replace" - }] - collection = cluster_templatepatch_model.ClusterTemplatePatchCollection - return collection.from_dict(data) - - -def cluster_template_data_with_valid_keypair_image_flavor(): - """Generates random clustertemplate data with valid data - - :returns: ClusterTemplateEntity with generated data - """ - master_flavor = config.Config.master_flavor_id - return cluster_template_data(keypair_id=config.Config.keypair_id, - image_id=config.Config.image_id, - flavor_id=config.Config.flavor_id, - master_flavor_id=master_flavor) - - -def cluster_template_data_with_missing_image(): - """Generates random cluster_template data with missing image - - :returns: ClusterTemplateEntity with generated data - """ - - return cluster_template_data( - keypair_id=config.Config.keypair_id, - flavor_id=config.Config.flavor_id, - master_flavor_id=config.Config.master_flavor_id) - - -def cluster_template_data_with_missing_flavor(): - """Generates random cluster_template data with missing flavor - - :returns: ClusterTemplateEntity with generated data - """ - - return cluster_template_data(keypair_id=config.Config.keypair_id, - image_id=config.Config.image_id) - - -def cluster_template_data_with_missing_keypair(): - """Generates random cluster_template data with missing keypair - - :returns: ClusterTemplateEntity with generated data - """ - - return cluster_template_data( - image_id=config.Config.image_id, - flavor_id=config.Config.flavor_id, - master_flavor_id=config.Config.master_flavor_id) - - -def cluster_template_valid_data_with_specific_coe(coe): - """Generates random cluster_template data with valid keypair and image - - :param coe: coe - :returns: ClusterTemplateEntity with generated data - """ - - return cluster_template_data(keypair_id=config.Config.keypair_id, - image_id=config.Config.image_id, coe=coe) - - -def valid_swarm_cluster_template(is_public=False): - """Generates a valid swarm cluster_template with valid data - - :returns: ClusterTemplateEntity with generated data - """ - master_flavor_id = config.Config.master_flavor_id - return cluster_template_data(image_id=config.Config.image_id, - flavor_id=config.Config.flavor_id, - public=is_public, - dns_nameserver=config.Config.dns_nameserver, - master_flavor_id=master_flavor_id, - coe="swarm", - cluster_distro=None, - external_network_id=config.Config.nic_id, - http_proxy=None, https_proxy=None, - no_proxy=None, network_driver=None, - volume_driver=None, labels={}, - tls_disabled=False) - - -def cluster_data(name=data_utils.rand_name('cluster'), - cluster_template_id=data_utils.rand_uuid(), - node_count=random_int(1, 5), discovery_url=gen_random_ip(), - create_timeout=random_int(1, 30), - master_count=random_int(1, 5)): - """Generates random cluster data - - cluster_template_id cannot be random for the cluster to be valid due to - validations for the presence of clustertemplate prior to clustertemplate - creation. - - :param name: cluster name (must be unique) - :param cluster_template_id: clustertemplate unique id (must already exist) - :param node_count: number of agents for cluster - :param discovery_url: url provided for node discovery - :param create_timeout: timeout in minutes for cluster create - :param master_count: number of master nodes for the cluster - :returns: ClusterEntity with generated data - """ - - data = { - "name": name, - "cluster_template_id": cluster_template_id, - "keypair": config.Config.keypair_id, - "node_count": node_count, - "discovery_url": None, - "create_timeout": create_timeout, - "master_count": master_count - } - model = cluster_model.ClusterEntity.from_dict(data) - - return model - - -def valid_cluster_data(cluster_template_id, - name=data_utils.rand_name('cluster'), - node_count=1, master_count=1, create_timeout=None): - """Generates random cluster data with valid - - :param cluster_template_id: clustertemplate unique id that already exists - :param name: cluster name (must be unique) - :param node_count: number of agents for cluster - :returns: ClusterEntity with generated data - """ - - return cluster_data(cluster_template_id=cluster_template_id, name=name, - master_count=master_count, node_count=node_count, - create_timeout=create_timeout) - - -def cluster_name_patch_data(name=data_utils.rand_name('cluster')): - """Generates random clustertemplate patch data - - :param name: name to replace in patch - :returns: ClusterPatchCollection with generated data - """ - - data = [{ - "path": "/name", - "value": name, - "op": "replace" - }] - return clusterpatch_model.ClusterPatchCollection.from_dict(data) - - -def cluster_api_addy_patch_data(address='0.0.0.0'): - """Generates random cluster patch data - - :param name: name to replace in patch - :returns: ClusterPatchCollection with generated data - """ - - data = [{ - "path": "/api_address", - "value": address, - "op": "replace" - }] - return clusterpatch_model.ClusterPatchCollection.from_dict(data) - - -def cluster_node_count_patch_data(node_count=2): - """Generates random cluster patch data - - :param name: name to replace in patch - :returns: ClusterPatchCollection with generated data - """ - - data = [{ - "path": "/node_count", - "value": node_count, - "op": "replace" - }] - return clusterpatch_model.ClusterPatchCollection.from_dict(data) diff --git a/magnum/tests/functional/common/manager.py b/magnum/tests/functional/common/manager.py deleted file mode 100644 index 9ecf0460..00000000 --- a/magnum/tests/functional/common/manager.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import clients -from tempest.common import credentials_factory as common_creds - -from magnum.tests.functional.api.v1.clients import bay_client -from magnum.tests.functional.api.v1.clients import baymodel_client -from magnum.tests.functional.api.v1.clients import cert_client -from magnum.tests.functional.api.v1.clients import cluster_client -from magnum.tests.functional.api.v1.clients import cluster_template_client -from magnum.tests.functional.api.v1.clients import magnum_service_client -from magnum.tests.functional.common import client -from magnum.tests.functional.common import config - - -class Manager(clients.Manager): - def __init__(self, credentials=None, request_type=None): - if not credentials: - credentials = common_creds.get_configured_credentials( - 'identity_admin') - super(Manager, self).__init__(credentials) - self.auth_provider.orig_base_url = self.auth_provider.base_url - self.auth_provider.base_url = self.bypassed_base_url - auth = self.auth_provider - if request_type == 'baymodel': - self.client = baymodel_client.BayModelClient(auth) - elif request_type == 'bay': - self.client = bay_client.BayClient(auth) - elif request_type == 'cert': - self.client = cert_client.CertClient(auth) - elif request_type == 'cluster_template': - self.client = cluster_template_client.ClusterTemplateClient(auth) - elif request_type == 'cluster': - self.client = cluster_client.ClusterClient(auth) - elif request_type == 'service': - self.client = magnum_service_client.MagnumServiceClient(auth) - else: - self.client = client.MagnumClient(auth) - - def bypassed_base_url(self, filters, auth_data=None): - if (config.Config.magnum_url and - filters['service'] == 'container-infra'): - return config.Config.magnum_url - return self.auth_provider.orig_base_url(filters, auth_data=auth_data) - - -class DefaultManager(Manager): - def __init__(self, credentials, request_type=None): - super(DefaultManager, self).__init__(credentials, request_type) - - -class AltManager(Manager): - def __init__(self, credentials, request_type=None): - super(AltManager, self).__init__(credentials, request_type) - - -class AdminManager(Manager): - def __init__(self, credentials, request_type=None): - super(AdminManager, self).__init__(credentials, request_type) diff --git a/magnum/tests/functional/common/models.py b/magnum/tests/functional/common/models.py deleted file mode 100644 index 2cf000f0..00000000 --- a/magnum/tests/functional/common/models.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - - -class BaseModel(object): - """Superclass Responsible for converting json data to/from model""" - - @classmethod - def from_json(cls, json_str): - return cls.from_dict(json.loads(json_str)) - - def to_json(self): - return json.dumps(self.to_dict()) - - @classmethod - def from_dict(cls, data): - model = cls() - for key in data: - setattr(model, key, data.get(key)) - return model - - def to_dict(self): - result = {} - for key in self.__dict__: - result[key] = getattr(self, key) - if isinstance(result[key], BaseModel): - result[key] = result[key].to_dict() - return result - - def __str__(self): - return "%s" % self.to_dict() - - -class EntityModel(BaseModel): - """Superclass resposible from converting dict to instance of model""" - - @classmethod - def from_dict(cls, data): - model = super(EntityModel, cls).from_dict(data) - if hasattr(model, cls.ENTITY_NAME): - val = getattr(model, cls.ENTITY_NAME) - setattr(model, cls.ENTITY_NAME, cls.MODEL_TYPE.from_dict(val)) - return model - - -class CollectionModel(BaseModel): - """Superclass resposible from converting dict to list of models""" - - @classmethod - def from_dict(cls, data): - model = super(CollectionModel, cls).from_dict(data) - - collection = [] - if hasattr(model, cls.COLLECTION_NAME): - for d in getattr(model, cls.COLLECTION_NAME): - collection.append(cls.MODEL_TYPE.from_dict(d)) - setattr(model, cls.COLLECTION_NAME, collection) - - return model diff --git a/magnum/tests/functional/common/utils.py b/magnum/tests/functional/common/utils.py deleted file mode 100644 index 83cd08e0..00000000 --- a/magnum/tests/functional/common/utils.py +++ /dev/null @@ -1,111 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import functools -import inspect -import time -import types - - -def def_method(f, *args, **kwargs): - @functools.wraps(f) - def new_method(self): - return f(self, *args, **kwargs) - return new_method - - -def parameterized_class(cls): - """A class decorator for running parameterized test cases. - - Mark your class with @parameterized_class. - Mark your test cases with @parameterized. - """ - test_functions = inspect.getmembers(cls, predicate=inspect.ismethod) - for (name, f) in test_functions: - if name.startswith('test_') and not hasattr(f, '_test_data'): - continue - - # remove the original test function from the class - delattr(cls, name) - - # add a new test function to the class for each entry in f._test_data - for tag, args in f._test_data.items(): - new_name = "{0}_{1}".format(f.__name__, tag) - if hasattr(cls, new_name): - raise Exception( - "Parameterized test case '{0}.{1}' created from '{0}.{2}' " - "already exists".format(cls.__name__, new_name, name)) - - # Using `def new_method(self): f(self, **args)` is not sufficient - # (all new_methods use the same args value due to late binding). - # Instead, use this factory function. - new_method = def_method(f, **args) - - # To add a method to a class, available for all instances: - # MyClass.method = types.MethodType(f, None, MyClass) - setattr(cls, new_name, types.MethodType(new_method, None, cls)) - return cls - - -def parameterized(data): - """A function decorator for parameterized test cases. - - Example: - - @parameterized({ - 'zero': dict(val=0), - 'one': dict(val=1), - }) - def test_val(self, val): - self.assertEqual(val, self.get_val()) - - The above will generate two test cases: - `test_val_zero` which runs with val=0 - `test_val_one` which runs with val=1 - - :param data: A dictionary that looks like {tag: {arg1: val1, ...}} - """ - def wrapped(f): - f._test_data = data - return f - return wrapped - - -def wait_for_condition(condition, interval=1, timeout=40): - start_time = time.time() - end_time = time.time() + timeout - while time.time() < end_time: - result = condition() - if result: - return result - time.sleep(interval) - raise Exception(("Timed out after %s seconds. Started " + - "on %s and ended on %s") % (timeout, start_time, end_time)) - - -def memoized(func): - """A decorator to cache function's return value""" - cache = {} - - @functools.wraps(func) - def wrapper(*args): - if not isinstance(args, collections.Hashable): - # args is not cacheable. just call the function. - return func(*args) - if args in cache: - return cache[args] - else: - value = func(*args) - cache[args] = value - return value - return wrapper diff --git a/magnum/tests/functional/k8s/__init__.py b/magnum/tests/functional/k8s/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/k8s/test_k8s_python_client.py b/magnum/tests/functional/k8s/test_k8s_python_client.py deleted file mode 100644 index 973f9da4..00000000 --- a/magnum/tests/functional/k8s/test_k8s_python_client.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional import python_client_base as base - - -class TestKubernetesAPIs(base.BaseK8sTest): - cluster_template_kwargs = { - "tls_disabled": False, - "network_driver": 'flannel', - "volume_driver": 'cinder', - "docker_storage_driver": 'overlay', - "labels": { - "system_pods_initial_delay": 3600, - "system_pods_timeout": 600, - "admission_control_list": "", - "kube_dashboard_enabled": False, - } - } diff --git a/magnum/tests/functional/k8s/test_magnum_python_client.py b/magnum/tests/functional/k8s/test_magnum_python_client.py deleted file mode 100644 index 2736219c..00000000 --- a/magnum/tests/functional/k8s/test_magnum_python_client.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from magnum.tests.functional.python_client_base import BaseMagnumClient - - -class TestListResources(BaseMagnumClient): - def test_cluster_model_list(self): - self.assertIsNotNone(self.cs.cluster_templates.list()) - - def test_cluster_list(self): - self.assertIsNotNone(self.cs.clusters.list()) diff --git a/magnum/tests/functional/k8s_coreos/__init__.py b/magnum/tests/functional/k8s_coreos/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/k8s_coreos/test_k8s_python_client.py b/magnum/tests/functional/k8s_coreos/test_k8s_python_client.py deleted file mode 100644 index ad659ed8..00000000 --- a/magnum/tests/functional/k8s_coreos/test_k8s_python_client.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional import python_client_base as base - - -class TestCoreosKubernetesAPIs(base.BaseK8sTest): - cluster_template_kwargs = { - "tls_disabled": True, - "network_driver": 'flannel', - "volume_driver": None, - "labels": { - "system_pods_initial_delay": 3600, - "system_pods_timeout": 600, - "kube_dashboard_enabled": False - } - } diff --git a/magnum/tests/functional/k8s_ironic/__init__.py b/magnum/tests/functional/k8s_ironic/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/k8s_ironic/test_k8s_python_client.py b/magnum/tests/functional/k8s_ironic/test_k8s_python_client.py deleted file mode 100644 index c19515cf..00000000 --- a/magnum/tests/functional/k8s_ironic/test_k8s_python_client.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional import python_client_base as base - - -class TestFedoraKubernetesIronicAPIs(base.BaseK8sTest): - cluster_complete_timeout = 3200 - cluster_template_kwargs = { - "tls_disabled": True, - "network_driver": 'flannel', - "volume_driver": None, - "fixed_subnet": 'private-subnet', - "server_type": 'bm', - "docker_storage_driver": 'overlay', - "labels": { - "system_pods_initial_delay": 3600, - "system_pods_timeout": 600, - "kube_dashboard_enabled": False - } - } diff --git a/magnum/tests/functional/mesos/__init__.py b/magnum/tests/functional/mesos/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/mesos/test_mesos_python_client.py b/magnum/tests/functional/mesos/test_mesos_python_client.py deleted file mode 100644 index bc33d261..00000000 --- a/magnum/tests/functional/mesos/test_mesos_python_client.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.tests.functional.python_client_base import ClusterTest - - -class TestClusterResource(ClusterTest): - coe = 'mesos' - cluster_template_kwargs = { - "tls_disabled": True, - "network_driver": 'docker', - "volume_driver": 'rexray' - } - - def test_cluster_create_and_delete(self): - pass diff --git a/magnum/tests/functional/python_client_base.py b/magnum/tests/functional/python_client_base.py deleted file mode 100755 index aed5a9c1..00000000 --- a/magnum/tests/functional/python_client_base.py +++ /dev/null @@ -1,509 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_magnum ----------------------------------- - -Tests for `magnum` module. -""" - -import os -import subprocess -import tempfile -import time - -import fixtures -from six.moves import configparser - -from heatclient import client as heatclient -from keystoneclient.v3 import client as ksclient -from kubernetes import client as k8s_config -from kubernetes.client import api_client -from kubernetes.client.apis import core_v1_api - -from magnum.common.utils import rmtree_without_raise -import magnum.conf -from magnum.tests.functional.common import base -from magnum.tests.functional.common import utils -from magnumclient.common.apiclient import exceptions -from magnumclient.common import cliutils -from magnumclient.v1 import client as v1client - -CONF = magnum.conf.CONF - - -class BaseMagnumClient(base.BaseMagnumTest): - - @classmethod - def setUpClass(cls): - # Collecting of credentials: - # - # Support the existence of a functional_creds.conf for - # testing. This makes it possible to use a config file. - super(BaseMagnumClient, cls).setUpClass() - user = cliutils.env('OS_USERNAME') - passwd = cliutils.env('OS_PASSWORD') - project_name = cliutils.env('OS_PROJECT_NAME') - auth_url = cliutils.env('OS_AUTH_URL') - insecure = cliutils.env('INSECURE') - region_name = cliutils.env('OS_REGION_NAME') - magnum_url = cliutils.env('BYPASS_URL') - image_id = cliutils.env('IMAGE_ID') - nic_id = cliutils.env('NIC_ID') - flavor_id = cliutils.env('FLAVOR_ID') - master_flavor_id = cliutils.env('MASTER_FLAVOR_ID') - keypair_id = cliutils.env('KEYPAIR_ID') - dns_nameserver = cliutils.env('DNS_NAMESERVER') - copy_logs = cliutils.env('COPY_LOGS') - user_domain_id = cliutils.env('OS_USER_DOMAIN_ID') - project_domain_id = cliutils.env('OS_PROJECT_DOMAIN_ID') - - config = configparser.RawConfigParser() - if config.read('functional_creds.conf'): - # the OR pattern means the environment is preferred for - # override - user = user or config.get('admin', 'user') - passwd = passwd or config.get('admin', 'pass') - project_name = project_name or config.get('admin', 'project_name') - auth_url = auth_url or config.get('auth', 'auth_url') - insecure = insecure or config.get('auth', 'insecure') - magnum_url = magnum_url or config.get('auth', 'magnum_url') - image_id = image_id or config.get('magnum', 'image_id') - nic_id = nic_id or config.get('magnum', 'nic_id') - flavor_id = flavor_id or config.get('magnum', 'flavor_id') - master_flavor_id = master_flavor_id or config.get( - 'magnum', 'master_flavor_id') - keypair_id = keypair_id or config.get('magnum', 'keypair_id') - dns_nameserver = dns_nameserver or config.get( - 'magnum', 'dns_nameserver') - user_domain_id = user_domain_id or config.get( - 'admin', 'user_domain_id') - project_domain_id = project_domain_id or config.get( - 'admin', 'project_domain_id') - - try: - copy_logs = copy_logs or config.get('magnum', 'copy_logs') - except configparser.NoOptionError: - pass - - cls.image_id = image_id - cls.nic_id = nic_id - cls.flavor_id = flavor_id - cls.master_flavor_id = master_flavor_id - cls.keypair_id = keypair_id - cls.dns_nameserver = dns_nameserver - cls.copy_logs = str(copy_logs).lower() == 'true' - cls.cs = v1client.Client(username=user, - api_key=passwd, - project_name=project_name, - auth_url=auth_url, - insecure=insecure, - user_domain_id=user_domain_id, - project_domain_id=project_domain_id, - service_type='container-infra', - region_name=region_name, - magnum_url=magnum_url, - api_version='latest') - cls.keystone = ksclient.Client(username=user, - password=passwd, - project_name=project_name, - project_domain_id=project_domain_id, - user_domain_id=user_domain_id, - auth_url=auth_url, - insecure=insecure) - token = cls.keystone.auth_token - heat_endpoint = cls.keystone.service_catalog.url_for( - service_type='orchestration') - cls.heat = heatclient.Client('1', token=token, endpoint=heat_endpoint) - - @classmethod - def _wait_on_status(cls, cluster, wait_status, finish_status, - timeout=6000): - # Check status every 60 seconds for a total of 100 minutes - - def _check_status(): - status = cls.cs.clusters.get(cluster.uuid).status - cls.LOG.debug("Cluster status is %s", status) - if status in wait_status: - return False - elif status in finish_status: - return True - else: - raise Exception("Unexpected Status: %s" % status) - - # sleep 1s to wait cluster status changes, this will be useful for - # the first time we wait for the status, to avoid another 59s - time.sleep(1) - utils.wait_for_condition(_check_status, interval=60, timeout=timeout) - - @classmethod - def _create_cluster_template(cls, name, **kwargs): - # TODO(eliqiao): We don't want these to be have default values, - # just leave them here to make things work. - # Plan is to support other kinds of ClusterTemplate - # creation. - coe = kwargs.pop('coe', 'kubernetes') - network_driver = kwargs.pop('network_driver', 'flannel') - volume_driver = kwargs.pop('volume_driver', 'cinder') - labels = kwargs.pop('labels', {"K1": "V1", "K2": "V2"}) - tls_disabled = kwargs.pop('tls_disabled', False) - fixed_subnet = kwargs.pop('fixed_subnet', None) - server_type = kwargs.pop('server_type', 'vm') - - cluster_template = cls.cs.cluster_templates.create( - name=name, - keypair_id=cls.keypair_id, - external_network_id=cls.nic_id, - image_id=cls.image_id, - flavor_id=cls.flavor_id, - master_flavor_id=cls.master_flavor_id, - network_driver=network_driver, - volume_driver=volume_driver, - dns_nameserver=cls.dns_nameserver, - coe=coe, - labels=labels, - tls_disabled=tls_disabled, - fixed_subnet=fixed_subnet, - server_type=server_type, - **kwargs) - return cluster_template - - @classmethod - def _create_cluster(cls, name, cluster_template_uuid): - cluster = cls.cs.clusters.create( - name=name, - cluster_template_id=cluster_template_uuid - ) - - return cluster - - @classmethod - def _show_cluster(cls, name): - cluster = cls.cs.clusters.get(name) - return cluster - - @classmethod - def _delete_cluster_template(cls, cluster_template_uuid): - cls.cs.cluster_templates.delete(cluster_template_uuid) - - @classmethod - def _delete_cluster(cls, cluster_uuid): - cls.cs.clusters.delete(cluster_uuid) - - try: - cls._wait_on_status( - cls.cluster, - ["CREATE_COMPLETE", "DELETE_IN_PROGRESS", "CREATE_FAILED"], - ["DELETE_FAILED", "DELETE_COMPLETE"], - timeout=600 - ) - except exceptions.NotFound: - pass - else: - if cls._show_cluster(cls.cluster.uuid).status == 'DELETE_FAILED': - raise Exception("Cluster %s delete failed" % cls.cluster.uuid) - - @classmethod - def get_copy_logs(cls): - return cls.copy_logs - - def _wait_for_cluster_complete(self, cluster): - self._wait_on_status( - cluster, - [None, "CREATE_IN_PROGRESS"], - ["CREATE_FAILED", "CREATE_COMPLETE"], - timeout=self.cluster_complete_timeout - ) - - if self.cs.clusters.get(cluster.uuid).status == 'CREATE_FAILED': - raise Exception("Cluster %s create failed" % cluster.uuid) - - return cluster - - -class ClusterTest(BaseMagnumClient): - - # NOTE (eliqiao) coe should be specified in subclasses - coe = None - cluster_template_kwargs = {} - config_contents = """[req] -distinguished_name = req_distinguished_name -req_extensions = req_ext -prompt = no -[req_distinguished_name] -CN = Your Name -[req_ext] -extendedKeyUsage = clientAuth -""" - - ca_dir = None - cluster = None - cluster_template = None - key_file = None - cert_file = None - ca_file = None - - cluster_complete_timeout = 1800 - - @classmethod - def setUpClass(cls): - super(ClusterTest, cls).setUpClass() - cls.cluster_template = cls._create_cluster_template( - cls.__name__, coe=cls.coe, **cls.cluster_template_kwargs) - cls.cluster = cls._create_cluster(cls.__name__, - cls.cluster_template.uuid) - if not cls.cluster_template_kwargs.get('tls_disabled', False): - # NOTE (wangbo) with multiple mangum-conductor processes, client - # ca files should be created after completion of cluster ca_cert - try: - cls._wait_on_status( - cls.cluster, - [None, "CREATE_IN_PROGRESS"], - ["CREATE_FAILED", "CREATE_COMPLETE"], - timeout=cls.cluster_complete_timeout - ) - except Exception: - # copy logs if setUpClass fails, may be this will not work - # as master_address, node_address would not be available, if - # not we can get that from nova - if cls.copy_logs: - cls.copy_logs_handler( - cls._get_nodes, - cls.cluster_template.coe, - 'default') - cls._create_tls_ca_files(cls.config_contents) - - @classmethod - def tearDownClass(cls): - if cls.ca_dir: - rmtree_without_raise(cls.ca_dir) - if cls.cluster: - cls._delete_cluster(cls.cluster.uuid) - if cls.cluster_template: - cls._delete_cluster_template(cls.cluster_template.uuid) - super(ClusterTest, cls).tearDownClass() - - def setUp(self): - super(ClusterTest, self).setUp() - - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 60) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid, set a default timeout. - test_timeout = CONF.cluster_heat.create_timeout - if test_timeout <= 0: - test_timeout = CONF.cluster_heat.create_timeout - - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - # Copy cluster nodes logs - if self.copy_logs: - self.addCleanup( - self.copy_logs_handler( - self._get_nodes, - self.cluster_template.coe, - 'default')) - self._wait_for_cluster_complete(self.cluster) - - def _get_nodes(self): - nodes = self._get_nodes_from_cluster() - if not [x for x in nodes if x]: - self.LOG.info("the list of nodes from cluster is empty") - nodes = self._get_nodes_from_stack() - if not [x for x in nodes if x]: - self.LOG.info("the list of nodes from stack is empty") - self.LOG.info("Nodes are: %s", nodes) - return nodes - - def _get_nodes_from_cluster(self): - nodes = [] - nodes.append(self.cs.clusters.get(self.cluster.uuid).master_addresses) - nodes.append(self.cs.clusters.get(self.cluster.uuid).node_addresses) - return nodes - - def _get_nodes_from_stack(self): - nodes = [] - stack = self.heat.stacks.get(self.cluster.stack_id) - stack_outputs = stack.to_dict().get('outputs', []) - output_keys = [] - if self.cluster_template.coe == "kubernetes": - output_keys = ["kube_masters", "kube_minions"] - elif self.cluster_template.coe == "swarm": - output_keys = ["swarm_masters", "swarm_nodes"] - elif self.cluster_template.coe == "mesos": - output_keys = ["mesos_master", "mesos_slaves"] - - for output in stack_outputs: - for key in output_keys: - if output['output_key'] == key: - nodes.append(output['output_value']) - return nodes - - @classmethod - def _create_tls_ca_files(cls, client_conf_contents): - """Creates ca files by client_conf_contents.""" - - cls.ca_dir = tempfile.mkdtemp() - cls.csr_file = '%s/client.csr' % cls.ca_dir - cls.client_config_file = '%s/client.conf' % cls.ca_dir - - cls.key_file = '%s/client.key' % cls.ca_dir - cls.cert_file = '%s/client.crt' % cls.ca_dir - cls.ca_file = '%s/ca.crt' % cls.ca_dir - - with open(cls.client_config_file, 'w') as f: - f.write(client_conf_contents) - - def _write_client_key(): - subprocess.call(['openssl', 'genrsa', - '-out', cls.key_file, - '4096']) - - def _create_client_csr(): - subprocess.call(['openssl', 'req', '-new', - '-days', '365', - '-key', cls.key_file, - '-out', cls.csr_file, - '-config', cls.client_config_file]) - - _write_client_key() - _create_client_csr() - - with open(cls.csr_file, 'r') as f: - csr_content = f.read() - - # magnum ca-sign --cluster secure-k8scluster --csr client.csr \ - # > client.crt - resp = cls.cs.certificates.create(cluster_uuid=cls.cluster.uuid, - csr=csr_content) - - with open(cls.cert_file, 'w') as f: - f.write(resp.pem) - - # magnum ca-show --cluster secure-k8scluster > ca.crt - resp = cls.cs.certificates.get(cls.cluster.uuid) - - with open(cls.ca_file, 'w') as f: - f.write(resp.pem) - - -class BaseK8sTest(ClusterTest): - coe = 'kubernetes' - - @classmethod - def setUpClass(cls): - super(BaseK8sTest, cls).setUpClass() - cls.kube_api_url = cls.cs.clusters.get(cls.cluster.uuid).api_address - config = k8s_config.ConfigurationObject() - config.host = cls.kube_api_url - config.ssl_ca_cert = cls.ca_file - config.cert_file = cls.cert_file - config.key_file = cls.key_file - k8s_client = api_client.ApiClient(config=config) - cls.k8s_api = core_v1_api.CoreV1Api(k8s_client) - - def setUp(self): - super(BaseK8sTest, self).setUp() - self.kube_api_url = self.cs.clusters.get(self.cluster.uuid).api_address - config = k8s_config.ConfigurationObject() - config.host = self.kube_api_url - config.ssl_ca_cert = self.ca_file - config.cert_file = self.cert_file - config.key_file = self.key_file - k8s_client = api_client.ApiClient(config=config) - self.k8s_api = core_v1_api.CoreV1Api(k8s_client) - # TODO(coreypobrien) https://bugs.launchpad.net/magnum/+bug/1551824 - utils.wait_for_condition(self._is_api_ready, 5, 600) - - def _is_api_ready(self): - try: - self.k8s_api.list_node() - self.LOG.info("API is ready.") - return True - except Exception: - self.LOG.info("API is not ready yet.") - return False - - def test_pod_apis(self): - pod_manifest = {'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': {'color': 'blue', 'name': 'test'}, - 'spec': {'containers': [{'image': 'dockerfile/redis', - 'name': 'redis'}]}} - - resp = self.k8s_api.create_namespaced_pod(body=pod_manifest, - namespace='default') - self.assertEqual('test', resp.metadata.name) - self.assertTrue(resp.status.phase) - - resp = self.k8s_api.read_namespaced_pod(name='test', - namespace='default') - self.assertEqual('test', resp.metadata.name) - self.assertTrue(resp.status.phase) - - resp = self.k8s_api.delete_namespaced_pod(name='test', body={}, - namespace='default') - - def test_service_apis(self): - service_manifest = {'apiVersion': 'v1', - 'kind': 'Service', - 'metadata': {'labels': {'name': 'frontend'}, - 'name': 'frontend', - 'resourceversion': 'v1'}, - 'spec': {'ports': [{'port': 80, - 'protocol': 'TCP', - 'targetPort': 80}], - 'selector': {'name': 'frontend'}}} - - resp = self.k8s_api.create_namespaced_service(body=service_manifest, - namespace='default') - self.assertEqual('frontend', resp.metadata.name) - self.assertTrue(resp.status) - - resp = self.k8s_api.read_namespaced_service(name='frontend', - namespace='default') - self.assertEqual('frontend', resp.metadata.name) - self.assertTrue(resp.status) - - resp = self.k8s_api.delete_namespaced_service(name='frontend', - namespace='default') - - def test_replication_controller_apis(self): - rc_manifest = { - 'apiVersion': 'v1', - 'kind': 'ReplicationController', - 'metadata': {'labels': {'name': 'frontend'}, - 'name': 'frontend'}, - 'spec': {'replicas': 2, - 'selector': {'name': 'frontend'}, - 'template': {'metadata': { - 'labels': {'name': 'frontend'}}, - 'spec': {'containers': [{ - 'image': 'nginx', - 'name': 'nginx', - 'ports': [{'containerPort': 80, - 'protocol': 'TCP'}]}]}}}} - - resp = self.k8s_api.create_namespaced_replication_controller( - body=rc_manifest, namespace='default') - self.assertEqual('frontend', resp.metadata.name) - self.assertEqual(2, resp.spec.replicas) - - resp = self.k8s_api.read_namespaced_replication_controller( - name='frontend', namespace='default') - self.assertEqual('frontend', resp.metadata.name) - self.assertEqual(2, resp.spec.replicas) - - resp = self.k8s_api.delete_namespaced_replication_controller( - name='frontend', body={}, namespace='default') diff --git a/magnum/tests/functional/swarm/__init__.py b/magnum/tests/functional/swarm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/swarm/test_swarm_python_client.py b/magnum/tests/functional/swarm/test_swarm_python_client.py deleted file mode 100644 index c8b8b0ee..00000000 --- a/magnum/tests/functional/swarm/test_swarm_python_client.py +++ /dev/null @@ -1,149 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from docker import errors -from requests import exceptions as req_exceptions - -from magnum.common import docker_utils -import magnum.conf -from magnum.tests.functional.python_client_base import ClusterTest - - -CONF = magnum.conf.CONF - - -class TestSwarmAPIs(ClusterTest): - """This class will cover swarm cluster basic functional testing. - - Will test all kinds of container action with tls_disabled=False mode. - """ - - coe = "swarm" - cluster_template_kwargs = { - "tls_disabled": False, - "network_driver": None, - "volume_driver": None, - "labels": {} - } - - @classmethod - def setUpClass(cls): - super(TestSwarmAPIs, cls).setUpClass() - cls.cluster_is_ready = None - - def setUp(self): - super(TestSwarmAPIs, self).setUp() - if self.cluster_is_ready is True: - return - # Note(eliqiao): In our test cases, docker client or magnum client will - # try to connect to swarm service which is running on master node, - # the endpoint is cluster.api_address(listen port is included), but the - # service is not ready right after the cluster was created, sleep for - # an acceptable time to wait for service being started. - # This is required, without this any api call will fail as - # 'ConnectionError: [Errno 111] Connection refused'. - msg = ("If you see this error in the functional test, it means " - "the docker service took too long to come up. This may not " - "be an actual error, so an option is to rerun the " - "functional test.") - if self.cluster_is_ready is False: - # In such case, no need to test below cases on gate, raise a - # meanful exception message to indicate ca setup failed after - # cluster creation, better to do a `recheck` - # We don't need to test since cluster is not ready. - raise Exception(msg) - - url = self.cs.clusters.get(self.cluster.uuid).api_address - # FIXME (strigazi) until we upgrade to docker-py 1.8.0 use - # only the https protocol instead of tcp. - https_url = url.replace('tcp', 'https') - - # Note(eliqiao): docker_utils.CONF.docker.default_timeout is 10, - # tested this default configure option not works on gate, it will - # cause container creation failed due to time out. - # Debug more found that we need to pull image when the first time to - # create a container, set it as 180s. - - docker_api_time_out = 180 - self.docker_client = docker_utils.DockerHTTPClient( - https_url, - CONF.docker.docker_remote_api_version, - docker_api_time_out, - client_key=self.key_file, - client_cert=self.cert_file, - ca_cert=self.ca_file) - - self.docker_client_non_tls = docker_utils.DockerHTTPClient( - https_url, - CONF.docker.docker_remote_api_version, - docker_api_time_out) - - def _container_operation(self, func, *args, **kwargs): - # NOTE(hongbin): Swarm cluster occasionally aborts the connection, - # so we re-try the operation several times here. In long-term, we - # need to investigate the cause of this issue. See bug #1583337. - for i in range(150): - try: - self.LOG.info("Calling function " + func.__name__) - return func(*args, **kwargs) - except req_exceptions.ConnectionError: - self.LOG.info("Connection aborted on calling Swarm API. " - "Will retry in 2 seconds.") - except errors.APIError as e: - if e.response.status_code != 500: - raise - self.LOG.info("Internal Server Error: " + str(e)) - time.sleep(2) - - raise Exception("Cannot connect to Swarm API.") - - def _create_container(self, **kwargs): - image = kwargs.get('image', 'docker.io/cirros') - command = kwargs.get('command', 'ping -c 1000 8.8.8.8') - return self._container_operation(self.docker_client.create_container, - image=image, command=command) - - def test_start_stop_container_from_api(self): - # Leverage docker client to create a container on the cluster we - # created, and try to start and stop it then delete it. - - resp = self._create_container(image="docker.io/cirros", - command="ping -c 1000 8.8.8.8") - - resp = self._container_operation(self.docker_client.containers, - all=True) - container_id = resp[0].get('Id') - self._container_operation(self.docker_client.start, - container=container_id) - - resp = self._container_operation(self.docker_client.containers) - self.assertEqual(1, len(resp)) - resp = self._container_operation(self.docker_client.inspect_container, - container=container_id) - self.assertTrue(resp['State']['Running']) - - self._container_operation(self.docker_client.stop, - container=container_id) - resp = self._container_operation(self.docker_client.inspect_container, - container=container_id) - self.assertEqual(False, resp['State']['Running']) - - self._container_operation(self.docker_client.remove_container, - container=container_id) - resp = self._container_operation(self.docker_client.containers) - self.assertEqual([], resp) - - def test_access_with_non_tls_client(self): - self.assertRaises(req_exceptions.SSLError, - self.docker_client_non_tls.containers) diff --git a/magnum/tests/functional/swarm_mode/__init__.py b/magnum/tests/functional/swarm_mode/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/swarm_mode/test_swarm_mode_python_client.py b/magnum/tests/functional/swarm_mode/test_swarm_mode_python_client.py deleted file mode 100644 index 32c0abad..00000000 --- a/magnum/tests/functional/swarm_mode/test_swarm_mode_python_client.py +++ /dev/null @@ -1,125 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import docker -import requests -import time - -import magnum.conf -from magnum.tests.functional.python_client_base import ClusterTest - - -CONF = magnum.conf.CONF - - -class TestSwarmModeAPIs(ClusterTest): - """This class will cover swarm cluster basic functional testing. - - Will test all kinds of container action with tls_disabled=False mode. - """ - - coe = "swarm-mode" - cluster_template_kwargs = { - "tls_disabled": False, - "network_driver": None, - "volume_driver": None, - "labels": {} - } - - @classmethod - def setUpClass(cls): - super(TestSwarmModeAPIs, cls).setUpClass() - cls.cluster_is_ready = None - - def setUp(self): - super(TestSwarmModeAPIs, self).setUp() - if self.cluster_is_ready is True: - return - # Note(eliqiao): In our test cases, docker client or magnum client will - # try to connect to swarm service which is running on master node, - # the endpoint is cluster.api_address(listen port is included), but the - # service is not ready right after the cluster was created, sleep for - # an acceptable time to wait for service being started. - # This is required, without this any api call will fail as - # 'ConnectionError: [Errno 111] Connection refused'. - msg = ("If you see this error in the functional test, it means " - "the docker service took too long to come up. This may not " - "be an actual error, so an option is to rerun the " - "functional test.") - if self.cluster_is_ready is False: - # In such case, no need to test below cases on gate, raise a - # meanful exception message to indicate ca setup failed after - # cluster creation, better to do a `recheck` - # We don't need to test since cluster is not ready. - raise Exception(msg) - - url = self.cs.clusters.get(self.cluster.uuid).api_address - - # Note(eliqiao): docker_utils.CONF.docker.default_timeout is 10, - # tested this default configure option not works on gate, it will - # cause container creation failed due to time out. - # Debug more found that we need to pull image when the first time to - # create a container, set it as 180s. - - docker_api_time_out = 180 - tls_config = docker.tls.TLSConfig( - client_cert=(self.cert_file, self.key_file), - verify=self.ca_file - ) - - self.docker_client = docker.DockerClient( - base_url=url, - tls=tls_config, - version='auto', - timeout=docker_api_time_out) - - self.docker_client_non_tls = docker.DockerClient( - base_url=url, - version='1.21', - timeout=docker_api_time_out) - - def test_create_remove_service(self): - # Create and remove a service using docker python SDK. - # Wait 15 mins until reach running and 5 mins until the service - # is removed. - - # Create a nginx service based on alpine linux - service = self.docker_client.services.create( - name='nginx', - image='nginx:mainline-alpine') - # wait for 15 mins to be running - for i in range(90): - if service.tasks()[0]['Status']['State'] == "running": - break - time.sleep(10) - # Verify that it is running - self.assertEqual('running', service.tasks()[0]['Status']['State']) - # Remove the service and wait for 5 mins untils it is removed - service.remove() - for i in range(30): - if self.docker_client.services.list() == []: - break - time.sleep(10) - # Verify that it is deleted - self.assertEqual([], self.docker_client.services.list()) - - def test_access_with_non_tls_client(self): - """Try to contact master's docker using the tcp protocol. - - tcp returns ConnectionError whereas https returns SSLError. The - default protocol we use in magnum is tcp which works fine docker - python SDK docker>=2.0.0. - """ - try: - self.docker_client_non_tls.info() - except requests.exceptions.ConnectionError: - pass diff --git a/magnum/tests/functional/tempest_tests/__init__.py b/magnum/tests/functional/tempest_tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/functional/tempest_tests/config.py b/magnum/tests/functional/tempest_tests/config.py deleted file mode 100644 index f7244951..00000000 --- a/magnum/tests/functional/tempest_tests/config.py +++ /dev/null @@ -1,67 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -from oslo_config import cfg -from tempest import config # noqa - - -service_available_group = cfg.OptGroup(name="service_available", - title="Available OpenStack Services") - -ServiceAvailableGroup = [ - cfg.BoolOpt("magnum", - default=True, - help="Whether or not magnum is expected to be available"), -] - -magnum_group = cfg.OptGroup(name="magnum", title="Magnum Options") - -MagnumGroup = [ - cfg.StrOpt("image_id", - default="fedora-atomic-latest", - help="Image id to be used for ClusterTemplate."), - - cfg.StrOpt("nic_id", - default="public", - help="NIC id."), - - cfg.StrOpt("keypair_id", - default="default", - help="Keypair id to use to log into nova instances."), - - cfg.StrOpt("flavor_id", - default="s1.magnum", - help="Flavor id to use for ClusterTemplate."), - - cfg.StrOpt("magnum_url", - help="Bypass URL for Magnum to skip service catalog lookup"), - - cfg.StrOpt("master_flavor_id", - default="m1.magnum", - help="Master flavor id to use for ClusterTemplate."), - - cfg.StrOpt("csr_location", - default="/opt/stack/new/magnum/default.csr", - deprecated_for_removal=True, - help="CSR location for certificates. This option is no " - "longer used for anything."), - - cfg.StrOpt("dns_nameserver", - default="8.8.8.8", - help="DNS nameserver to use for ClusterTemplate."), - - cfg.BoolOpt("copy_logs", - default=True, - help="Specify whether to copy nova server logs on failure."), -] diff --git a/magnum/tests/functional/tempest_tests/plugin.py b/magnum/tests/functional/tempest_tests/plugin.py deleted file mode 100644 index 923f60b8..00000000 --- a/magnum/tests/functional/tempest_tests/plugin.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from tempest import config -from tempest.test_discover import plugins - -import magnum -from magnum.tests.functional.tempest_tests import config as magnum_config - - -class MagnumTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(magnum.__file__)))[0] - test_dir = "magnum/tests/functional/api/v1" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - config.register_opt_group( - conf, magnum_config.service_available_group, - magnum_config.ServiceAvailableGroup) - config.register_opt_group(conf, magnum_config.magnum_group, - magnum_config.MagnumGroup) - - def get_opt_lists(self): - return [ - (magnum_config.magnum_group.name, magnum_config.MagnumGroup), - ('service_available', magnum_config.ServiceAvailableGroup) - ] diff --git a/magnum/tests/policy_fixture.py b/magnum/tests/policy_fixture.py deleted file mode 100644 index 28e1bb1b..00000000 --- a/magnum/tests/policy_fixture.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import fixtures -from oslo_config import cfg -from oslo_policy import _parser -from oslo_policy import opts as policy_opts - -from magnum.common import policy as magnum_policy -from magnum.tests import fake_policy - -CONF = cfg.CONF - - -class PolicyFixture(fixtures.Fixture): - - def _setUp(self): - self.policy_dir = self.useFixture(fixtures.TempDir()) - self.policy_file_name = os.path.join(self.policy_dir.path, - 'policy.json') - with open(self.policy_file_name, 'w') as policy_file: - policy_file.write(fake_policy.policy_data) - policy_opts.set_defaults(CONF) - CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') - magnum_policy._ENFORCER = None - self.addCleanup(magnum_policy.init().clear) - - def set_rules(self, rules): - policy = magnum_policy._ENFORCER - policy.set_rules({k: _parser.parse_rule(v) - for k, v in rules.items()}) diff --git a/magnum/tests/unit/__init__.py b/magnum/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/api/__init__.py b/magnum/tests/unit/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/api/base.py b/magnum/tests/unit/api/base.py deleted file mode 100644 index 4d02ba45..00000000 --- a/magnum/tests/unit/api/base.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE: Ported from ceilometer/tests/api.py (subsequently moved to -# ceilometer/tests/api/__init__.py). This should be oslo'ified: -# https://bugs.launchpad.net/ironic/+bug/1255115. - -# NOTE(deva): import auth_token so we can override a config option -from keystonemiddleware import auth_token # noqa -import mock -from oslo_config import cfg -import pecan -import pecan.testing -from six.moves.urllib import parse as urlparse - -from magnum.api import hooks -from magnum.tests.unit.db import base - -PATH_PREFIX = '/v1' - - -class FunctionalTest(base.DbTestCase): - """Base class for API tests. - - Pecan controller test. Used for functional tests of Pecan controllers where - you need to test your literal application and its integration with the - framework. - """ - - def setUp(self): - super(FunctionalTest, self).setUp() - cfg.CONF.set_override("auth_version", "v2.0", - group='keystone_authtoken') - cfg.CONF.set_override("admin_user", "admin", - group='keystone_authtoken') - - # Determine where we are so we can set up paths in the config - self.config = { - 'app': { - 'root': 'magnum.api.controllers.root.RootController', - 'modules': ['magnum.api'], - 'hooks': [ - hooks.ContextHook(), - hooks.RPCHook(), - hooks.NoExceptionTracebackHook(), - ], - }, - } - - self.app = self._make_app() - - def reset_pecan(): - pecan.set_config({}, overwrite=True) - - self.addCleanup(reset_pecan) - - p = mock.patch('magnum.api.controllers.v1.Controller._check_version') - self._check_version = p.start() - self.addCleanup(p.stop) - - def _verify_attrs(self, attrs, response, positive=True): - if positive is True: - verify_method = self.assertIn - else: - verify_method = self.assertNotIn - for attr in attrs: - verify_method(attr, response) - - def _make_app(self, config=None): - if not config: - config = self.config - - return pecan.testing.load_test_app(config) - - def _request_json(self, path, params, expect_errors=False, headers=None, - method="post", extra_environ=None, status=None, - path_prefix=PATH_PREFIX): - """Sends simulated HTTP request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param method: Request method type. Appropriate method function call - should be used rather than passing attribute in. - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - :param path_prefix: prefix of the url path - """ - full_path = path_prefix + path - print('%s: %s %s' % (method.upper(), full_path, params)) - response = getattr(self.app, "%s_json" % method)( - str(full_path), - params=params, - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors - ) - print('GOT:%s' % response) - return response - - def put_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP PUT request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="put") - - def post_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP POST request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="post") - - def patch_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP PATCH request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="patch") - - def delete(self, path, expect_errors=False, headers=None, - extra_environ=None, status=None, path_prefix=PATH_PREFIX): - """Sends simulated HTTP DELETE request to Pecan test app. - - :param path: url path of target service - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - :param path_prefix: prefix of the url path - """ - full_path = path_prefix + path - print('DELETE: %s' % (full_path)) - response = self.app.delete(str(full_path), - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors) - print('GOT:%s' % response) - return response - - def get_json(self, path, expect_errors=False, headers=None, - extra_environ=None, q=None, path_prefix=PATH_PREFIX, - **params): - """Sends simulated HTTP GET request to Pecan test app. - - :param path: url path of target service - :param expect_errors: Boolean value;whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param q: list of queries consisting of: field, value, op, and type - keys - :param path_prefix: prefix of the url path - :param params: content for wsgi.input of request - """ - if q is None: - q = [] - full_path = path_prefix + path - query_params = {'q.field': [], - 'q.value': [], - 'q.op': [], - } - for query in q: - for name in ['field', 'op', 'value']: - query_params['q.%s' % name].append(query.get(name, '')) - all_params = {} - all_params.update(params) - if q: - all_params.update(query_params) - print('GET: %s %r' % (full_path, all_params)) - response = self.app.get(full_path, - params=all_params, - headers=headers, - extra_environ=extra_environ, - expect_errors=expect_errors) - if not expect_errors: - response = response.json - print('GOT:%s' % response) - return response - - def validate_link(self, link, bookmark=False): - """Checks if the given link can get correct data.""" - # removes the scheme and net location parts of the link - url_parts = list(urlparse.urlparse(link)) - url_parts[0] = url_parts[1] = '' - - # bookmark link should not have the version in the URL - if bookmark and url_parts[2].startswith(PATH_PREFIX): - return False - - full_path = urlparse.urlunparse(url_parts) - try: - self.get_json(full_path, path_prefix='') - return True - except Exception: - return False diff --git a/magnum/tests/unit/api/controllers/__init__.py b/magnum/tests/unit/api/controllers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/api/controllers/auth-paste.ini b/magnum/tests/unit/api/controllers/auth-paste.ini deleted file mode 100644 index 34229a4d..00000000 --- a/magnum/tests/unit/api/controllers/auth-paste.ini +++ /dev/null @@ -1,23 +0,0 @@ -[pipeline:main] -pipeline = cors healthcheck request_id authtoken api_v1 - -[app:api_v1] -paste.app_factory = magnum.api.app:app_factory - -[filter:authtoken] -paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = magnum -latent_allow_methods = GET, PUT, POST, DELETE, PATCH -latent_allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID -latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID - -[filter:healthcheck] -paste.filter_factory = oslo_middleware:Healthcheck.factory -backends = disable_by_file -disable_by_file_path = /tmp/magnum_healthcheck_disable diff --git a/magnum/tests/unit/api/controllers/auth-root-access.ini b/magnum/tests/unit/api/controllers/auth-root-access.ini deleted file mode 100644 index 8abdf3c9..00000000 --- a/magnum/tests/unit/api/controllers/auth-root-access.ini +++ /dev/null @@ -1,24 +0,0 @@ -[pipeline:main] -pipeline = cors healthcheck request_id authtoken api_v1 - -[app:api_v1] -paste.app_factory = magnum.api.app:app_factory - -[filter:authtoken] -acl_public_routes = / -paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = magnum -latent_allow_methods = GET, PUT, POST, DELETE, PATCH -latent_allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID -latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID - -[filter:healthcheck] -paste.filter_factory = oslo_middleware:Healthcheck.factory -backends = disable_by_file -disable_by_file_path = /tmp/magnum_healthcheck_disable diff --git a/magnum/tests/unit/api/controllers/auth-v1-access.ini b/magnum/tests/unit/api/controllers/auth-v1-access.ini deleted file mode 100644 index 64eb1820..00000000 --- a/magnum/tests/unit/api/controllers/auth-v1-access.ini +++ /dev/null @@ -1,24 +0,0 @@ -[pipeline:main] -pipeline = cors healthcheck request_id authtoken api_v1 - -[app:api_v1] -paste.app_factory = magnum.api.app:app_factory - -[filter:authtoken] -acl_public_routes = /v1 -paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = magnum -latent_allow_methods = GET, PUT, POST, DELETE, PATCH -latent_allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID -latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID - -[filter:healthcheck] -paste.filter_factory = oslo_middleware:Healthcheck.factory -backends = disable_by_file -disable_by_file_path = /tmp/magnum_healthcheck_disable diff --git a/magnum/tests/unit/api/controllers/noauth-paste.ini b/magnum/tests/unit/api/controllers/noauth-paste.ini deleted file mode 100644 index d7a74d2f..00000000 --- a/magnum/tests/unit/api/controllers/noauth-paste.ini +++ /dev/null @@ -1,19 +0,0 @@ -[pipeline:main] -pipeline = cors request_id api_v1 - -[app:api_v1] -paste.app_factory = magnum.api.app:app_factory - -[filter:authtoken] -acl_public_routes = / -paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = magnum -latent_allow_methods = GET, PUT, POST, DELETE, PATCH -latent_allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID -latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID diff --git a/magnum/tests/unit/api/controllers/test_base.py b/magnum/tests/unit/api/controllers/test_base.py deleted file mode 100644 index 9c565cb8..00000000 --- a/magnum/tests/unit/api/controllers/test_base.py +++ /dev/null @@ -1,409 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from webob import exc - -from magnum.api.controllers import base -from magnum.api.controllers import versions -from magnum.api import versioned_method -from magnum.tests import base as test_base - - -class TestVersion(test_base.TestCase): - def setUp(self): - super(TestVersion, self).setUp() - self.a = versions.Version( - {versions.Version.string: "container-infra 2.0"}, - "container-infra 2.0", "container-infra 2.1") - self.b = versions.Version( - {versions.Version.string: "container-infra 2.0"}, - "container-infra 2.0", "container-infra 2.1") - self.c = versions.Version( - {versions.Version.string: "container-infra 2.2"}, - "container-infra 2.0", "container-infra 2.2") - - def test_is_null_true(self): - self.a.major = 0 - self.a.minor = 0 - self.assertEqual(0 == 0, self.a.is_null()) - - def test_is_null_false(self): - self.assertEqual(2 == 0, self.a.is_null()) - - def test__eq__with_equal(self): - self.assertEqual(2 == 2, self.a == self.b) - - def test__eq__with_unequal(self): - self.a.major = 1 - self.assertEqual(1 == 2, self.a == self.b) - - def test__ne__with_equal(self): - self.assertEqual(2 != 2, self.a != self.b) - - def test__ne__with_unequal(self): - self.a.major = 1 - self.assertEqual(1 != 2, self.a != self.b) - - def test__lt__with_higher_major_version(self): - self.a.major = 2 - self.b.major = 1 - - self.assertEqual(2 < 1, self.a < self.b) - - def test__lt__with_lower_major_version(self): - self.a.major = 1 - self.b.major = 2 - - self.assertEqual(1 < 2, self.a < self.b) - - def test__lt__with_higher_minor_version(self): - self.a.minor = 2 - self.b.minor = 1 - - self.assertEqual(self.a.major, self.b.major) - self.assertEqual(2 < 1, self.a < self.b) - - def test__lt__with_lower_minor_version(self): - self.a.minor = 1 - self.b.minor = 2 - - self.assertEqual(self.a.major, self.b.major) - self.assertEqual(1 < 2, self.a < self.b) - - def test__gt__with_higher_major_version(self): - self.a.major = 2 - self.b.major = 1 - - self.assertEqual(2 > 1, self.a > self.b) - - def test__gt__with_lower_major_version(self): - self.a.major = 1 - self.b.major = 2 - - self.assertEqual(1 > 2, self.a > self.b) - - def test__gt__with_higher_minor_version(self): - self.a.minor = 2 - self.b.minor = 1 - - self.assertEqual(self.a.major, self.b.major) - self.assertEqual(2 > 1, self.a > self.b) - - def test__gt__with_lower_minor_version(self): - self.a.minor = 1 - self.b.minor = 2 - - self.assertEqual(self.a.major, self.b.major) - self.assertEqual(1 > 2, self.a > self.b) - - def test__le__with_equal(self): - self.assertEqual(2 == 2, self.a <= self.b) - - def test__le__with_higher_version(self): - self.a.major = 3 - self.assertEqual(3 <= 2, self.a <= self.b) - - def test__le__with_lower_version(self): - self.a.major = 1 - self.assertEqual(1 <= 2, self.a <= self.b) - - def test__ge__with_equal(self): - self.assertEqual(2 >= 2, self.a >= self.b) - - def test__ge__with_higher_version(self): - self.a.major = 3 - self.assertEqual(3 >= 2, self.a >= self.b) - - def test__ge__with_lower_version(self): - self.a.major = 1 - self.assertEqual(1 >= 2, self.a >= self.b) - - def test_matches_start_version(self): - self.assertEqual(0 >= 0, self.a.matches(self.b, self.c)) - - def test_matches_end_version(self): - self.a.minor = 2 - self.assertEqual(2 <= 2, self.a.matches(self.b, self.c)) - - def test_matches_valid_version(self): - self.a.minor = 1 - self.assertEqual(0 <= 1 <= 2, self.a.matches(self.b, self.c)) - - def test_matches_version_too_high(self): - self.a.minor = 3 - self.assertEqual(0 <= 3 <= 2, self.a.matches(self.b, self.c)) - - def test_matches_version_too_low(self): - self.a.major = 1 - self.assertEqual(2 <= 1 <= 2, self.a.matches(self.b, self.c)) - - def test_matches_null_version(self): - self.a.major = 0 - self.a.minor = 0 - self.assertRaises(ValueError, self.a.matches, self.b, self.c) - - @mock.patch('magnum.api.controllers.versions.Version.parse_headers') - def test_init(self, mock_parse): - a = mock.Mock() - b = mock.Mock() - mock_parse.return_value = (a, b) - v = versions.Version('test', 'foo', 'bar') - - mock_parse.assert_called_with('test', 'foo', 'bar') - self.assertEqual(a, v.major) - self.assertEqual(b, v.minor) - - @mock.patch('magnum.api.controllers.versions.Version.parse_headers') - def test_repr(self, mock_parse): - mock_parse.return_value = (123, 456) - v = versions.Version('test', mock.ANY, mock.ANY) - result = "%s" % v - self.assertEqual('123.456', result) - - @mock.patch('magnum.api.controllers.versions.Version.parse_headers') - def test_repr_with_strings(self, mock_parse): - mock_parse.return_value = ('abc', 'def') - v = versions.Version('test', mock.ANY, mock.ANY) - result = "%s" % v - self.assertEqual('abc.def', result) - - def test_parse_headers_ok(self): - version = versions.Version.parse_headers( - {versions.Version.string: 'container-infra 123.456'}, - mock.ANY, mock.ANY) - self.assertEqual((123, 456), version) - - def test_parse_headers_latest(self): - for s in ['magnum latest', 'magnum LATEST']: - version = versions.Version.parse_headers( - {versions.Version.string: s}, mock.ANY, 'container-infra 1.9') - self.assertEqual((1, 9), version) - - def test_parse_headers_bad_length(self): - self.assertRaises( - exc.HTTPNotAcceptable, - versions.Version.parse_headers, - {versions.Version.string: 'container-infra 1'}, - mock.ANY, - mock.ANY) - self.assertRaises( - exc.HTTPNotAcceptable, - versions.Version.parse_headers, - {versions.Version.string: 'container-infra 1.2.3'}, - mock.ANY, - mock.ANY) - - def test_parse_no_header(self): - # this asserts that the minimum version string is applied - version = versions.Version.parse_headers({}, 'container-infra 1.1', - 'container-infra 1.5') - self.assertEqual((1, 1), version) - - def test_parse_incorrect_service_type(self): - self.assertRaises( - exc.HTTPNotAcceptable, - versions.Version.parse_headers, - {versions.Version.string: '1.1'}, - 'container-infra 1.1', - 'container-infra 1.1') - self.assertRaises( - exc.HTTPNotAcceptable, - versions.Version.parse_headers, - {versions.Version.string: 'nova 1.1'}, - 'container-infra 1.1', - 'container-infra 1.1') - - -class TestController(test_base.TestCase): - def test_check_for_versions_intersection_negative(self): - func_list = \ - [versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '2.1'), - versions.Version('', '', '', - '2.4'), - None), - versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '2.11'), - versions.Version('', '', '', - '3.1'), - None), - versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '2.8'), - versions.Version('', '', '', - '2.9'), - None), - ] - - result = base.Controller.check_for_versions_intersection( - func_list=func_list) - self.assertFalse(result) - - func_list = \ - [versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '2.12'), - versions.Version('', '', '', - '2.14'), - None), - versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '3.0'), - versions.Version('', '', '', - '3.4'), - None) - ] - - result = base.Controller.check_for_versions_intersection( - func_list=func_list) - self.assertFalse(result) - - def test_check_for_versions_intersection_positive(self): - func_list = \ - [versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '2.1'), - versions.Version('', '', '', - '2.4'), - None), - versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '2.3'), - versions.Version('', '', '', - '3.1'), - None), - versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '2.9'), - versions.Version('', '', '', - '3.4'), - None) - ] - - result = base.Controller.check_for_versions_intersection( - func_list=func_list) - self.assertTrue(result) - - def test_check_for_versions_intersection_shared_start_end(self): - func_list = \ - [versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '1.1'), - versions.Version('', '', '', - '1.1'), - None), - versioned_method.VersionedMethod('foo', - versions.Version('', '', '', - '1.1'), - versions.Version('', '', '', - '1.2'), - None) - ] - - result = base.Controller.check_for_versions_intersection( - func_list=func_list) - self.assertTrue(result) - - def test_api_version_decorator(self): - - class MyController(base.Controller): - @base.Controller.api_version('1.0', '1.1') - def testapi1(self): - return 'API1_1.0_1.1' - - @base.Controller.api_version('1.2', '1.3') # noqa - def testapi1(self): - return 'API1_1.2_1.3' - - @base.Controller.api_version('2.1', '2.2') - def testapi2(self): - return 'API2_2.1_2.2' - - @base.Controller.api_version('1.0', '2.0') # noqa - def testapi2(self): - return 'API2_1.0_2.0' - - controller = MyController() - # verify list was added to controller - self.assertIsNotNone(controller.versioned_methods) - - api1_list = controller.versioned_methods['testapi1'] - api2_list = controller.versioned_methods['testapi2'] - - # verify versioned_methods reordered correctly - self.assertEqual('1.2', str(api1_list[0].start_version)) - self.assertEqual('1.3', str(api1_list[0].end_version)) - self.assertEqual('1.0', str(api1_list[1].start_version)) - self.assertEqual('1.1', str(api1_list[1].end_version)) - - # verify stored methods can be called - result = api1_list[0].func(controller) - self.assertEqual('API1_1.2_1.3', result) - result = api1_list[1].func(controller) - self.assertEqual('API1_1.0_1.1', result) - - # verify versioned_methods reordered correctly - self.assertEqual('2.1', str(api2_list[0].start_version)) - self.assertEqual('2.2', str(api2_list[0].end_version)) - self.assertEqual('1.0', str(api2_list[1].start_version)) - self.assertEqual('2.0', str(api2_list[1].end_version)) - - # Verify stored methods can be called - result = api2_list[0].func(controller) - self.assertEqual('API2_2.1_2.2', result) - result = api2_list[1].func(controller) - self.assertEqual('API2_1.0_2.0', result) - - @mock.patch('pecan.request') - def test_controller_get_attribute(self, mock_pecan_request): - - class MyController(base.Controller): - @base.Controller.api_version('1.0', '1.1') - def testapi1(self): - return 'API1_1.0_1.1' - - @base.Controller.api_version('1.2', '1.3') # noqa - def testapi1(self): - return 'API1_1.2_1.3' - - controller = MyController() - mock_pecan_request.version = versions.Version("", "", - "", "1.2") - controller.request = mock_pecan_request - - method = controller.__getattribute__('testapi1') - result = method() - self.assertEqual('API1_1.2_1.3', result) - - @mock.patch('pecan.request') - def test_controller_get_attr_version_not_found(self, - mock_pecan_request): - - class MyController(base.Controller): - @base.Controller.api_version('1.0', '1.1') - def testapi1(self): - return 'API1_1.0_1.1' - - @base.Controller.api_version('1.3', '1.4') # noqa - def testapi1(self): - return 'API1_1.3_1.4' - - controller = MyController() - mock_pecan_request.version = versions.Version("", "", - "", "1.2") - controller.request = mock_pecan_request - - self.assertRaises(exc.HTTPNotAcceptable, - controller.__getattribute__, 'testapi1') diff --git a/magnum/tests/unit/api/controllers/test_root.py b/magnum/tests/unit/api/controllers/test_root.py deleted file mode 100644 index 789e1041..00000000 --- a/magnum/tests/unit/api/controllers/test_root.py +++ /dev/null @@ -1,262 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fixtures -import mock -from oslo_config import cfg -from webob import exc as webob_exc - -try: - import configparser as ConfigParser -except ImportError: - import ConfigParser -import shutil -import webtest - -from magnum.api import app -from magnum.api.controllers import v1 as v1_api -from magnum.tests import base as test_base -from magnum.tests.unit.api import base as api_base - - -class TestRootController(api_base.FunctionalTest): - def setUp(self): - super(TestRootController, self).setUp() - self.root_expected = { - u'description': u'Magnum is an OpenStack project which ' - 'aims to provide container cluster management.', - u'name': u'OpenStack Magnum API', - u'versions': [{u'id': u'v1', - u'links': - [{u'href': u'http://localhost/v1/', - u'rel': u'self'}], - u'status': u'CURRENT', - u'max_version': u'1.6', - u'min_version': u'1.1'}]} - - self.v1_expected = { - u'media_types': - [{u'base': u'application/json', - u'type': u'application/vnd.openstack.magnum.v1+json'}], - u'links': [{u'href': u'http://localhost/v1/', - u'rel': u'self'}, - {u'href': - u'http://docs.openstack.org/developer' - '/magnum/dev/api-spec-v1.html', - u'type': u'text/html', u'rel': u'describedby'}], - u'stats': [{u'href': u'http://localhost/v1/stats/', - u'rel': u'self'}, - {u'href': u'http://localhost/stats/', - u'rel': u'bookmark'}], - u'bays': [{u'href': u'http://localhost/v1/bays/', - u'rel': u'self'}, - {u'href': u'http://localhost/bays/', - u'rel': u'bookmark'}], - u'baymodels': [{u'href': u'http://localhost/v1/baymodels/', - u'rel': u'self'}, - {u'href': u'http://localhost/baymodels/', - u'rel': u'bookmark'}], - u'clusters': [{u'href': u'http://localhost/v1/clusters/', - u'rel': u'self'}, - {u'href': u'http://localhost/clusters/', - u'rel': u'bookmark'}], - u'quotas': [{u'href': u'http://localhost/v1/quotas/', - u'rel': u'self'}, - {u'href': u'http://localhost/quotas/', - u'rel': u'bookmark'}], - u'clustertemplates': - [{u'href': u'http://localhost/v1/clustertemplates/', - u'rel': u'self'}, - {u'href': u'http://localhost/clustertemplates/', - u'rel': u'bookmark'}], - u'id': u'v1', - u'certificates': [{u'href': u'http://localhost/v1/certificates/', - u'rel': u'self'}, - {u'href': u'http://localhost/certificates/', - u'rel': u'bookmark'}], - u'mservices': [{u'href': u'http://localhost/v1/mservices/', - u'rel': u'self'}, - {u'href': u'http://localhost/mservices/', - u'rel': u'bookmark'}]} - - def make_app(self, paste_file): - file_name = self.get_path(paste_file) - cfg.CONF.set_override("api_paste_config", file_name, group="api") - return webtest.TestApp(app.load_app()) - - def test_version(self): - response = self.app.get('/') - self.assertEqual(self.root_expected, response.json) - - def test_v1_controller(self): - response = self.app.get('/v1/') - self.assertEqual(self.v1_expected, response.json) - - def test_get_not_found(self): - response = self.app.get('/a/bogus/url', expect_errors=True) - assert response.status_int == 404 - - def test_api_paste_file_not_exist(self): - cfg.CONF.set_override('api_paste_config', 'non-existent-file', - group='api') - with mock.patch.object(cfg.CONF, 'find_file') as ff: - ff.return_value = None - self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) - - @mock.patch('magnum.api.app.deploy') - def test_api_paste_file_not_exist_not_abs(self, mock_deploy): - path = self.get_path(cfg.CONF['api']['api_paste_config'] + 'test') - cfg.CONF.set_override('api_paste_config', path, group='api') - self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) - - def test_noauth(self): - # Don't need to auth - paste_file = "magnum/tests/unit/api/controllers/noauth-paste.ini" - app = self.make_app(paste_file) - - response = app.get('/') - self.assertEqual(self.root_expected, response.json) - - response = app.get('/v1/') - self.assertEqual(self.v1_expected, response.json) - - response = app.get('/v1/clustertemplates') - self.assertEqual(200, response.status_int) - - def test_auth_with_no_public_routes(self): - # All apis need auth when access - paste_file = "magnum/tests/unit/api/controllers/auth-paste.ini" - app = self.make_app(paste_file) - - response = app.get('/', expect_errors=True) - self.assertEqual(401, response.status_int) - - response = app.get('/v1/', expect_errors=True) - self.assertEqual(401, response.status_int) - - def test_auth_with_root_access(self): - # Only / can access without auth - paste_file = "magnum/tests/unit/api/controllers/auth-root-access.ini" - app = self.make_app(paste_file) - - response = app.get('/') - self.assertEqual(self.root_expected, response.json) - - response = app.get('/v1/', expect_errors=True) - self.assertEqual(401, response.status_int) - - response = app.get('/v1/clustermodels', expect_errors=True) - self.assertEqual(401, response.status_int) - - def test_auth_with_v1_access(self): - # Only /v1 can access without auth - paste_file = "magnum/tests/unit/api/controllers/auth-v1-access.ini" - app = self.make_app(paste_file) - - response = app.get('/', expect_errors=True) - self.assertEqual(401, response.status_int) - - response = app.get('/v1/') - self.assertEqual(self.v1_expected, response.json) - - response = app.get('/v1/clustertemplates', expect_errors=True) - self.assertEqual(401, response.status_int) - - -class TestHeathcheck(api_base.FunctionalTest): - def setUp(self): - self.addCleanup(self.remove_files) - super(TestHeathcheck, self).setUp() - - # Create Temporary file - self.tempdir = self.useFixture(fixtures.TempDir()).path - paste_ini = "magnum/tests/unit/api/controllers/auth-paste.ini" - - # Read current file and create new one - config = ConfigParser.RawConfigParser() - config.read(self.get_path(paste_ini)) - config.set('filter:healthcheck', - 'disable_by_file_path', - self.tempdir + "/disable") - with open(self.tempdir + "/paste.ini", 'wt') as configfile: - config.write(configfile) - - # Set config and create app - cfg.CONF.set_override("api_paste_config", - self.tempdir + "/paste.ini", - group="api") - self.app = webtest.TestApp(app.load_app()) - - def remove_files(self): - shutil.rmtree(self.tempdir, ignore_errors=True) - - def test_healthcheck_enabled(self): - # Check the healthcheck works - response = self.app.get('/healthcheck') - self.assertEqual(200, response.status_int) - self.assertEqual(b"OK", response.body) - - def test_healthcheck_disable_file(self): - # Create the file that disables healthcheck - fo = open(self.tempdir + "/disable", 'a') - fo.close() - - response = self.app.get('/healthcheck', expect_errors=True) - self.assertEqual(503, response.status_int) - self.assertEqual(b"DISABLED BY FILE", response.body) - - -class TestV1Routing(api_base.FunctionalTest): - def test_route_checks_version(self): - self.get_json('/') - self._check_version.assert_called_once_with(mock.ANY, - mock.ANY) - - -class TestCheckVersions(test_base.TestCase): - def setUp(self): - super(TestCheckVersions, self).setUp() - - class ver(object): - major = None - minor = None - - self.version = ver() - - def test_check_version_invalid_major_version(self): - self.version.major = v1_api.BASE_VERSION + 1 - self.version.minor = v1_api.MIN_VER.minor - self.assertRaises(webob_exc.HTTPNotAcceptable, - v1_api.Controller()._check_version, - self.version) - - def test_check_version_too_low(self): - self.version.major = v1_api.BASE_VERSION - self.version.minor = v1_api.MIN_VER.minor - 1 - self.assertRaises(webob_exc.HTTPNotAcceptable, - v1_api.Controller()._check_version, - self.version) - - def test_check_version_too_high(self): - self.version.major = v1_api.BASE_VERSION - self.version.minor = v1_api.MAX_VER.minor + 1 - e = self.assertRaises(webob_exc.HTTPNotAcceptable, - v1_api.Controller()._check_version, - self.version, {'fake-headers': - v1_api.MAX_VER.minor}) - - self.assertEqual(v1_api.MAX_VER.minor, e.headers['fake-headers']) - - def test_check_version_ok(self): - self.version.major = v1_api.BASE_VERSION - self.version.minor = v1_api.MIN_VER.minor - v1_api.Controller()._check_version(self.version) diff --git a/magnum/tests/unit/api/controllers/v1/__init__.py b/magnum/tests/unit/api/controllers/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/api/controllers/v1/test_bay.py b/magnum/tests/unit/api/controllers/v1/test_bay.py deleted file mode 100644 index 40d7fe9d..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_bay.py +++ /dev/null @@ -1,948 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import mock -from oslo_config import cfg -from oslo_utils import timeutils -from oslo_utils import uuidutils -from wsme import types as wtypes - -from magnum.api import attr_validator -from magnum.api.controllers.v1 import bay as api_bay -from magnum.common import exception -from magnum.conductor import api as rpcapi -from magnum import objects -from magnum.tests import base -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.api import utils as apiutils -from magnum.tests.unit.objects import utils as obj_utils - - -class TestBayObject(base.TestCase): - - def test_bay_init(self): - bay_dict = apiutils.bay_post_data(baymodel_id=None) - del bay_dict['node_count'] - del bay_dict['master_count'] - del bay_dict['bay_create_timeout'] - bay = api_bay.Bay(**bay_dict) - self.assertEqual(1, bay.node_count) - self.assertEqual(1, bay.master_count) - self.assertEqual(60, bay.bay_create_timeout) - - # test unset value for baymodel_id - bay.baymodel_id = wtypes.Unset - self.assertEqual(wtypes.Unset, bay.baymodel_id) - - # test backwards compatibility of bay fields with new objects - bay_dict['bay_create_timeout'] = 15 - bay_dict['bay_faults'] = {'testfault': 'fault'} - bay = api_bay.Bay(**bay_dict) - self.assertEqual(15, bay.bay_create_timeout) - self.assertEqual(15, bay.create_timeout) - self.assertIn('testfault', bay.bay_faults) - self.assertIn('testfault', bay.faults) - - def test_as_dict_faults(self): - bay_dict = apiutils.bay_post_data(baymodel_id=None) - del bay_dict['node_count'] - del bay_dict['master_count'] - del bay_dict['bay_create_timeout'] - bay = api_bay.Bay(**bay_dict) - bay.bay_faults = {'testfault': 'fault'} - dict = bay.as_dict() - self.assertEqual({'testfault': 'fault'}, dict['faults']) - - -class TestListBay(api_base.FunctionalTest): - - _bay_attrs = ("name", "baymodel_id", "node_count", "status", - "master_count", "stack_id", "bay_create_timeout") - - _expand_bay_attrs = ("name", "baymodel_id", "node_count", "status", - "api_address", "discovery_url", "node_addresses", - "master_count", "master_addresses", "stack_id", - "bay_create_timeout", "status_reason") - - def setUp(self): - super(TestListBay, self).setUp() - obj_utils.create_test_cluster_template(self.context) - - def test_empty(self): - response = self.get_json('/bays') - self.assertEqual([], response['bays']) - - def test_one(self): - bay = obj_utils.create_test_cluster(self.context) - response = self.get_json('/bays') - self.assertEqual(bay.uuid, response['bays'][0]["uuid"]) - self._verify_attrs(self._bay_attrs, response['bays'][0]) - - # Verify atts that should not appear from bay's get_all response - none_attrs = set(self._expand_bay_attrs) - set(self._bay_attrs) - self._verify_attrs(none_attrs, response['bays'][0], positive=False) - - def test_get_one(self): - bay = obj_utils.create_test_cluster(self.context) - response = self.get_json('/bays/%s' % bay['uuid']) - self.assertEqual(bay.uuid, response['uuid']) - self._verify_attrs(self._expand_bay_attrs, response) - - @mock.patch('magnum.common.clients.OpenStackClients.heat') - def test_get_one_failed_bay(self, mock_heat): - fake_resources = mock.MagicMock() - fake_resources.resource_name = 'fake_name' - fake_resources.resource_status_reason = 'fake_reason' - - ht = mock.MagicMock() - ht.resources.list.return_value = [fake_resources] - mock_heat.return_value = ht - - bay = obj_utils.create_test_cluster(self.context, - status='CREATE_FAILED') - response = self.get_json('/bays/%s' % bay['uuid']) - self.assertEqual(bay.uuid, response['uuid']) - self.assertEqual({'fake_name': 'fake_reason'}, response['bay_faults']) - - @mock.patch('magnum.common.clients.OpenStackClients.heat') - def test_get_one_failed_bay_heatclient_exception(self, mock_heat): - mock_heat.resources.list.side_effect = Exception('fake') - bay = obj_utils.create_test_cluster(self.context, - status='CREATE_FAILED') - response = self.get_json('/bays/%s' % bay['uuid']) - self.assertEqual(bay.uuid, response['uuid']) - self.assertEqual({}, response['bay_faults']) - - def test_get_one_by_name(self): - bay = obj_utils.create_test_cluster(self.context) - response = self.get_json('/bays/%s' % bay['name']) - self.assertEqual(bay.uuid, response['uuid']) - self._verify_attrs(self._expand_bay_attrs, response) - - def test_get_one_by_name_not_found(self): - response = self.get_json( - '/bays/not_found', - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_one_by_name_multiple_bay(self): - obj_utils.create_test_cluster(self.context, name='test_bay', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster(self.context, name='test_bay', - uuid=uuidutils.generate_uuid()) - response = self.get_json('/bays/test_bay', expect_errors=True) - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_all_with_pagination_marker(self): - bay_list = [] - for id_ in range(4): - bay = obj_utils.create_test_cluster(self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bay_list.append(bay) - - response = self.get_json('/bays?limit=3&marker=%s' - % bay_list[2].uuid) - self.assertEqual(1, len(response['bays'])) - self.assertEqual(bay_list[-1].uuid, response['bays'][0]['uuid']) - - def test_detail(self): - bay = obj_utils.create_test_cluster(self.context) - response = self.get_json('/bays/detail') - self.assertEqual(bay.uuid, response['bays'][0]["uuid"]) - self._verify_attrs(self._expand_bay_attrs, response['bays'][0]) - - def test_detail_with_pagination_marker(self): - bay_list = [] - for id_ in range(4): - bay = obj_utils.create_test_cluster(self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bay_list.append(bay) - - response = self.get_json('/bays/detail?limit=3&marker=%s' - % bay_list[2].uuid) - self.assertEqual(1, len(response['bays'])) - self.assertEqual(bay_list[-1].uuid, response['bays'][0]['uuid']) - self._verify_attrs(self._expand_bay_attrs, response['bays'][0]) - - def test_detail_against_single(self): - bay = obj_utils.create_test_cluster(self.context) - response = self.get_json('/bays/%s/detail' % bay['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - bm_list = [] - for id_ in range(5): - bay = obj_utils.create_test_cluster(self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bm_list.append(bay.uuid) - response = self.get_json('/bays') - self.assertEqual(len(bm_list), len(response['bays'])) - uuids = [b['uuid'] for b in response['bays']] - self.assertEqual(sorted(bm_list), sorted(uuids)) - - def test_links(self): - uuid = uuidutils.generate_uuid() - obj_utils.create_test_cluster(self.context, id=1, uuid=uuid) - response = self.get_json('/bays/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_cluster(self.context, id=id_, - uuid=uuidutils.generate_uuid()) - response = self.get_json('/bays/?limit=3') - self.assertEqual(3, len(response['bays'])) - - next_marker = response['bays'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_cluster(self.context, id=id_, - uuid=uuidutils.generate_uuid()) - response = self.get_json('/bays') - self.assertEqual(3, len(response['bays'])) - - next_marker = response['bays'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - self.cluster_template = obj_utils.create_test_cluster_template( - self.context) - self.bay = obj_utils.create_test_cluster(self.context, - name='bay_example_A', - node_count=3) - p = mock.patch.object(rpcapi.API, 'cluster_update') - self.mock_bay_update = p.start() - self.mock_bay_update.side_effect = self._simulate_rpc_bay_update - self.addCleanup(p.stop) - - def _simulate_rpc_bay_update(self, bay, rollback=False): - bay.save() - return bay - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok(self, mock_utcnow): - new_node_count = 4 - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.patch_json('/bays/%s' % self.bay.uuid, - [{'path': '/node_count', - 'value': new_node_count, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/bays/%s' % self.bay.uuid) - self.assertEqual(new_node_count, response['node_count']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - # Assert nothing else was changed - self.assertEqual(self.bay.uuid, response['uuid']) - self.assertEqual(self.bay.cluster_template_id, response['baymodel_id']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok_by_name(self, mock_utcnow): - new_node_count = 4 - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.patch_json('/bays/%s' % self.bay.name, - [{'path': '/node_count', - 'value': new_node_count, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/bays/%s' % self.bay.uuid) - self.assertEqual(new_node_count, response['node_count']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - # Assert nothing else was changed - self.assertEqual(self.bay.uuid, response['uuid']) - self.assertEqual(self.bay.cluster_template_id, response['baymodel_id']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok_by_name_not_found(self, mock_utcnow): - name = 'not_found' - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.patch_json('/bays/%s' % name, - [{'path': '/name', 'value': name, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(404, response.status_code) - - def test_replace_baymodel_id_failed(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, - uuid=uuidutils.generate_uuid()) - response = self.patch_json('/bays/%s' % self.bay.uuid, - [{'path': '/baymodel_id', - 'value': cluster_template.uuid, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok_by_name_multiple_bay(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - obj_utils.create_test_cluster(self.context, name='test_bay', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster(self.context, name='test_bay', - uuid=uuidutils.generate_uuid()) - - response = self.patch_json('/bays/test_bay', - [{'path': '/name', 'value': 'test_bay', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(409, response.status_code) - - def test_replace_non_existent_baymodel_id(self): - response = self.patch_json('/bays/%s' % self.bay.uuid, - [{'path': '/baymodel_id', - 'value': uuidutils.generate_uuid(), - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_invalid_node_count(self): - response = self.patch_json('/bays/%s' % self.bay.uuid, - [{'path': '/node_count', 'value': -1, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_non_existent_bay(self): - response = self.patch_json('/bays/%s' % uuidutils.generate_uuid(), - [{'path': '/name', - 'value': 'bay_example_B', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_replace_bay_name_failed(self): - response = self.patch_json('/bays/%s' % self.bay.uuid, - [{'path': '/name', - 'value': 'bay_example_B', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_add_non_existent_property(self): - response = self.patch_json( - '/bays/%s' % self.bay.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - @mock.patch.object(rpcapi.API, 'cluster_update_async') - def test_update_bay_async(self, mock_update): - response = self.patch_json( - '/bays/%s' % self.bay.name, - [{'path': '/node_count', 'value': 4, - 'op': 'replace'}], - headers={'OpenStack-API-Version': 'container-infra 1.2'}) - - self.assertEqual(202, response.status_code) - - @mock.patch.object(rpcapi.API, 'cluster_update_async') - def test_update_bay_with_rollback_enabled(self, mock_update): - response = self.patch_json( - '/bays/%s/?rollback=True' % self.bay.name, - [{'path': '/node_count', 'value': 4, - 'op': 'replace'}], - headers={'OpenStack-API-Version': 'container-infra 1.3'}) - - mock_update.assert_called_once_with(mock.ANY, rollback=True) - self.assertEqual(202, response.status_code) - - def test_remove_ok(self): - response = self.get_json('/bays/%s' % self.bay.uuid) - self.assertIsNotNone(response['name']) - - response = self.patch_json('/bays/%s' % self.bay.uuid, - [{'path': '/node_count', 'op': 'remove'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/bays/%s' % self.bay.uuid) - # only allow node_count for bay, and default value is 1 - self.assertEqual(1, response['node_count']) - # Assert nothing else was changed - self.assertEqual(self.bay.uuid, response['uuid']) - self.assertEqual(self.bay.cluster_template_id, response['baymodel_id']) - self.assertEqual(self.bay.name, response['name']) - self.assertEqual(self.bay.master_count, response['master_count']) - - def test_remove_mandatory_property_fail(self): - mandatory_properties = ('/uuid', '/baymodel_id') - for p in mandatory_properties: - response = self.patch_json('/bays/%s' % self.bay.uuid, - [{'path': p, 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_remove_non_existent_property(self): - response = self.patch_json( - '/bays/%s' % self.bay.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - -class TestPost(api_base.FunctionalTest): - - def setUp(self): - super(TestPost, self).setUp() - self.cluster_template = obj_utils.create_test_cluster_template( - self.context) - p = mock.patch.object(rpcapi.API, 'cluster_create') - self.mock_bay_create = p.start() - self.mock_bay_create.side_effect = self._simulate_rpc_bay_create - self.addCleanup(p.stop) - p = mock.patch.object(attr_validator, 'validate_os_resources') - self.mock_valid_os_res = p.start() - self.addCleanup(p.stop) - - def _simulate_rpc_bay_create(self, bay, bay_create_timeout): - bay.create() - return bay - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_bay(self, mock_utcnow): - bdict = apiutils.bay_post_data() - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.post_json('/bays', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - # Check location header - self.assertIsNotNone(response.location) - self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) - self.assertNotIn('updated_at', response.json.keys) - return_created_at = timeutils.parse_isotime( - response.json['created_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_created_at) - self.assertEqual(bdict['bay_create_timeout'], - response.json['bay_create_timeout']) - - def test_create_bay_set_project_id_and_user_id(self): - bdict = apiutils.bay_post_data() - - def _simulate_rpc_bay_create(bay, bay_create_timeout): - self.assertEqual(self.context.project_id, bay.project_id) - self.assertEqual(self.context.user_id, bay.user_id) - bay.create() - return bay - self.mock_bay_create.side_effect = _simulate_rpc_bay_create - - self.post_json('/bays', bdict) - - def test_create_bay_doesnt_contain_id(self): - with mock.patch.object(self.dbapi, 'create_cluster', - wraps=self.dbapi.create_cluster) as cc_mock: - bdict = apiutils.bay_post_data(name='bay_example_A') - response = self.post_json('/bays', bdict) - self.assertEqual(bdict['name'], response.json['name']) - cc_mock.assert_called_once_with(mock.ANY) - # Check that 'id' is not in first arg of positional args - self.assertNotIn('id', cc_mock.call_args[0][0]) - - def test_create_bay_generate_uuid(self): - bdict = apiutils.bay_post_data() - del bdict['uuid'] - - response = self.post_json('/bays', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(bdict['name'], response.json['name']) - self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) - - def test_create_bay_no_baymodel_id(self): - bdict = apiutils.bay_post_data() - del bdict['baymodel_id'] - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - - def test_create_bay_with_non_existent_baymodel_id(self): - bdict = apiutils.bay_post_data(baymodel_id=uuidutils.generate_uuid()) - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_baymodel_name(self): - bdict = apiutils.bay_post_data(baymodel_id=self.cluster_template.name) - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - - def test_create_bay_with_node_count_zero(self): - bdict = apiutils.bay_post_data() - bdict['node_count'] = 0 - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_node_count_negative(self): - bdict = apiutils.bay_post_data() - bdict['node_count'] = -1 - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_no_node_count(self): - bdict = apiutils.bay_post_data() - del bdict['node_count'] - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(1, response.json['node_count']) - - def test_create_bay_with_master_count_zero(self): - bdict = apiutils.bay_post_data() - bdict['master_count'] = 0 - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_no_master_count(self): - bdict = apiutils.bay_post_data() - del bdict['master_count'] - response = self.post_json('/bays', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(1, response.json['master_count']) - - def test_create_bay_with_invalid_long_name(self): - bdict = apiutils.bay_post_data(name='x' * 243) - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_invalid_integer_name(self): - bdict = apiutils.bay_post_data(name='123456') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_invalid_integer_str_name(self): - bdict = apiutils.bay_post_data(name='123456test_bay') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_hyphen_invalid_at_start_name(self): - bdict = apiutils.bay_post_data(name='-test_bay') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_period_invalid_at_start_name(self): - bdict = apiutils.bay_post_data(name='.test_bay') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_underscore_invalid_at_start_name(self): - bdict = apiutils.bay_post_data(name='_test_bay') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_valid_str_int_name(self): - bdict = apiutils.bay_post_data(name='test_bay123456') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(response.json['name'], bdict['name']) - - def test_create_bay_with_hyphen_valid_name(self): - bdict = apiutils.bay_post_data(name='test-bay') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(response.json['name'], bdict['name']) - - def test_create_bay_with_period_valid_name(self): - bdict = apiutils.bay_post_data(name='test.bay') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(response.json['name'], bdict['name']) - - def test_create_bay_with_period_at_end_valid_name(self): - bdict = apiutils.bay_post_data(name='testbay.') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(response.json['name'], bdict['name']) - - def test_create_bay_with_hyphen_at_end_valid_name(self): - bdict = apiutils.bay_post_data(name='testbay-') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(response.json['name'], bdict['name']) - - def test_create_bay_with_underscore_at_end_valid_name(self): - bdict = apiutils.bay_post_data(name='testbay_') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(response.json['name'], bdict['name']) - - def test_create_bay_with_mix_special_char_valid_name(self): - bdict = apiutils.bay_post_data(name='test.-_bay') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(response.json['name'], bdict['name']) - - def test_create_bay_with_capital_letter_start_valid_name(self): - bdict = apiutils.bay_post_data(name='Testbay') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(response.json['name'], bdict['name']) - - def test_create_bay_with_invalid_empty_name(self): - bdict = apiutils.bay_post_data(name='') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_without_name(self): - bdict = apiutils.bay_post_data() - del bdict['name'] - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertIsNotNone(response.json['name']) - - def test_create_bay_with_timeout_none(self): - bdict = apiutils.bay_post_data() - bdict['bay_create_timeout'] = None - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - - def test_create_bay_with_no_timeout(self): - def _simulate_rpc_bay_create(bay, bay_create_timeout): - self.assertEqual(60, bay_create_timeout) - bay.create() - return bay - self.mock_bay_create.side_effect = _simulate_rpc_bay_create - bdict = apiutils.bay_post_data() - del bdict['bay_create_timeout'] - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - - def test_create_bay_with_timeout_negative(self): - bdict = apiutils.bay_post_data() - bdict['bay_create_timeout'] = -1 - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_bay_with_timeout_zero(self): - bdict = apiutils.bay_post_data() - bdict['bay_create_timeout'] = 0 - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - - def test_create_bay_with_invalid_flavor(self): - bdict = apiutils.bay_post_data() - self.mock_valid_os_res.side_effect = exception.FlavorNotFound( - 'test-flavor') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(400, response.status_int) - - def test_create_bay_with_invalid_ext_network(self): - bdict = apiutils.bay_post_data() - self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( - 'test-net') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(400, response.status_int) - - def test_create_bay_with_invalid_keypair(self): - bdict = apiutils.bay_post_data() - self.mock_valid_os_res.side_effect = exception.KeyPairNotFound( - 'test-key') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(404, response.status_int) - - def test_create_bay_with_nonexist_image(self): - bdict = apiutils.bay_post_data() - self.mock_valid_os_res.side_effect = exception.ImageNotFound( - 'test-img') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(400, response.status_int) - - def test_create_bay_with_multi_images_same_name(self): - bdict = apiutils.bay_post_data() - self.mock_valid_os_res.side_effect = exception.Conflict('test-img') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(409, response.status_int) - - def test_create_bay_with_on_os_distro_image(self): - bdict = apiutils.bay_post_data() - self.mock_valid_os_res.side_effect = exception.OSDistroFieldNotFound( - 'img') - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(400, response.status_int) - - def test_create_bay_with_no_lb_one_node(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, name='foo', uuid='foo', master_lb_enabled=False) - bdict = apiutils.bay_post_data(baymodel_id=cluster_template.name, - master_count=1) - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - - def test_create_bay_with_no_lb_multi_node(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, name='foo', uuid='foo', master_lb_enabled=False) - bdict = apiutils.bay_post_data(baymodel_id=cluster_template.name, - master_count=3) - response = self.post_json('/bays', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - - def test_create_bay_with_docker_volume_size(self): - bdict = apiutils.bay_post_data() - bdict['docker_volume_size'] = 3 - response = self.post_json('/bays', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - bay, timeout = self.mock_bay_create.call_args - self.assertEqual(3, bay[0].docker_volume_size) - - def test_create_bay_without_docker_volume_size(self): - bdict = apiutils.bay_post_data() - # Remove the default docker_volume_size from the bay dict. - del bdict['docker_volume_size'] - response = self.post_json('/bays', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - bay, timeout = self.mock_bay_create.call_args - # Verify docker_volume_size from BayModel is used - self.assertEqual(20, bay[0].docker_volume_size) - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - self.cluster_template = obj_utils.create_test_cluster_template( - self.context) - self.bay = obj_utils.create_test_cluster(self.context) - p = mock.patch.object(rpcapi.API, 'cluster_delete') - self.mock_bay_delete = p.start() - self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete - self.addCleanup(p.stop) - - def _simulate_rpc_bay_delete(self, bay_uuid): - bay = objects.Cluster.get_by_uuid(self.context, bay_uuid) - bay.destroy() - - def test_delete_bay(self): - self.delete('/bays/%s' % self.bay.uuid) - response = self.get_json('/bays/%s' % self.bay.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_bay_not_found(self): - uuid = uuidutils.generate_uuid() - response = self.delete('/bays/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_bay_with_name_not_found(self): - response = self.delete('/bays/not_found', expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_bay_with_name(self): - response = self.delete('/bays/%s' % self.bay.name, - expect_errors=True) - self.assertEqual(204, response.status_int) - - def test_delete_multiple_bay_by_name(self): - obj_utils.create_test_cluster(self.context, name='test_bay', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster(self.context, name='test_bay', - uuid=uuidutils.generate_uuid()) - response = self.delete('/bays/test_bay', expect_errors=True) - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - -class TestBayPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestBayPolicyEnforcement, self).setUp() - obj_utils.create_test_cluster_template(self.context) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({rule: "project:non_fake"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "bay:get_all", self.get_json, '/bays', expect_errors=True) - - def test_policy_disallow_get_one(self): - self.bay = obj_utils.create_test_cluster(self.context) - self._common_policy_check( - "bay:get", self.get_json, '/bays/%s' % self.bay.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "bay:detail", self.get_json, - '/bays/%s/detail' % uuidutils.generate_uuid(), - expect_errors=True) - - def test_policy_disallow_update(self): - self.bay = obj_utils.create_test_cluster(self.context, - name='bay_example_A', - node_count=3) - self._common_policy_check( - "bay:update", self.patch_json, '/bays/%s' % self.bay.name, - [{'path': '/name', 'value': "new_name", 'op': 'replace'}], - expect_errors=True) - - def test_policy_disallow_create(self): - bdict = apiutils.bay_post_data(name='bay_example_A') - self._common_policy_check( - "bay:create", self.post_json, '/bays', bdict, expect_errors=True) - - def _simulate_rpc_bay_delete(self, bay_uuid): - bay = objects.Cluster.get_by_uuid(self.context, bay_uuid) - bay.destroy() - - def test_policy_disallow_delete(self): - p = mock.patch.object(rpcapi.API, 'cluster_delete') - self.mock_bay_delete = p.start() - self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete - self.addCleanup(p.stop) - self.bay = obj_utils.create_test_cluster(self.context) - self._common_policy_check( - "bay:delete", self.delete, '/bays/%s' % self.bay.uuid, - expect_errors=True) - - def _owner_check(self, rule, func, *args, **kwargs): - self.policy.set_rules({rule: "user_id:%(user_id)s"}) - response = func(*args, **kwargs) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_only_owner_get_one(self): - bay = obj_utils.create_test_cluster(self.context, user_id='another') - self._owner_check("bay:get", self.get_json, '/bays/%s' % bay.uuid, - expect_errors=True) - - def test_policy_only_owner_update(self): - bay = obj_utils.create_test_cluster(self.context, user_id='another') - self._owner_check( - "bay:update", self.patch_json, '/bays/%s' % bay.uuid, - [{'path': '/name', 'value': "new_name", 'op': 'replace'}], - expect_errors=True) - - def test_policy_only_owner_delete(self): - bay = obj_utils.create_test_cluster(self.context, user_id='another') - self._owner_check("bay:delete", self.delete, '/bays/%s' % bay.uuid, - expect_errors=True) diff --git a/magnum/tests/unit/api/controllers/v1/test_baymodel.py b/magnum/tests/unit/api/controllers/v1/test_baymodel.py deleted file mode 100644 index 20324d02..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_baymodel.py +++ /dev/null @@ -1,1037 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import mock -from oslo_utils import timeutils -from oslo_utils import uuidutils -from six.moves.urllib import parse as urlparse -from webtest.app import AppError -from wsme import types as wtypes - -from magnum.api import attr_validator -from magnum.api.controllers.v1 import baymodel as api_baymodel -from magnum.common import exception -from magnum.common import policy as magnum_policy -import magnum.conf -from magnum.tests import base -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.api import utils as apiutils -from magnum.tests.unit.objects import utils as obj_utils - -CONF = magnum.conf.CONF - - -class TestBayModelObject(base.TestCase): - - def test_baymodel_init(self): - baymodel_dict = apiutils.baymodel_post_data() - del baymodel_dict['image_id'] - baymodel = api_baymodel.BayModel(**baymodel_dict) - self.assertEqual(wtypes.Unset, baymodel.image_id) - - -class TestListBayModel(api_base.FunctionalTest): - - _baymodel_attrs = ('name', 'apiserver_port', 'network_driver', - 'coe', 'flavor_id', 'fixed_network', - 'dns_nameserver', 'http_proxy', - 'docker_volume_size', 'server_type', - 'cluster_distro', 'external_network_id', - 'image_id', 'registry_enabled', 'no_proxy', - 'keypair_id', 'https_proxy', 'tls_disabled', - 'public', 'labels', 'master_flavor_id', - 'volume_driver', 'insecure_registry') - - def test_empty(self): - response = self.get_json('/baymodels') - self.assertEqual([], response['baymodels']) - - def test_one(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/baymodels') - self.assertEqual(baymodel.uuid, response['baymodels'][0]["uuid"]) - self._verify_attrs(self._baymodel_attrs, - response['baymodels'][0]) - - def test_get_one(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/baymodels/%s' % baymodel['uuid']) - self.assertEqual(baymodel.uuid, response['uuid']) - self._verify_attrs(self._baymodel_attrs, response) - - def test_get_one_by_name(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/baymodels/%s' % baymodel['name']) - self.assertEqual(baymodel.uuid, response['uuid']) - self._verify_attrs(self._baymodel_attrs, response) - - def test_get_one_by_name_not_found(self): - response = self.get_json( - '/baymodels/not_found', - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_one_by_name_multiple_baymodel(self): - obj_utils.create_test_cluster_template( - self.context, name='test_baymodel', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster_template( - self.context, name='test_baymodel', - uuid=uuidutils.generate_uuid()) - response = self.get_json( - '/baymodels/test_baymodel', - expect_errors=True) - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_all_with_pagination_marker(self): - bm_list = [] - for id_ in range(4): - baymodel = obj_utils.create_test_cluster_template( - self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bm_list.append(baymodel) - - response = self.get_json('/baymodels?limit=3&marker=%s' - % bm_list[2].uuid) - self.assertEqual(1, len(response['baymodels'])) - self.assertEqual(bm_list[-1].uuid, response['baymodels'][0]['uuid']) - - def test_detail(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/baymodels/detail') - self.assertEqual(baymodel.uuid, response['baymodels'][0]["uuid"]) - self._verify_attrs(self._baymodel_attrs, - response['baymodels'][0]) - - def test_detail_with_pagination_marker(self): - bm_list = [] - for id_ in range(4): - baymodel = obj_utils.create_test_cluster_template( - self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bm_list.append(baymodel) - - response = self.get_json('/baymodels/detail?limit=3&marker=%s' - % bm_list[2].uuid) - self.assertEqual(1, len(response['baymodels'])) - self.assertEqual(bm_list[-1].uuid, response['baymodels'][0]['uuid']) - self._verify_attrs(self._baymodel_attrs, - response['baymodels'][0]) - - def test_detail_against_single(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/baymodels/%s/detail' % baymodel['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - bm_list = [] - for id_ in range(5): - baymodel = obj_utils.create_test_cluster_template( - self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bm_list.append(baymodel.uuid) - response = self.get_json('/baymodels') - self.assertEqual(len(bm_list), len(response['baymodels'])) - uuids = [bm['uuid'] for bm in response['baymodels']] - self.assertEqual(sorted(bm_list), sorted(uuids)) - - def test_links(self): - uuid = uuidutils.generate_uuid() - obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid) - response = self.get_json('/baymodels/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_cluster_template( - self.context, id=id_, uuid=uuidutils.generate_uuid()) - response = self.get_json('/baymodels/?limit=3') - self.assertEqual(3, len(response['baymodels'])) - - next_marker = response['baymodels'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_cluster_template( - self.context, id=id_, uuid=uuidutils.generate_uuid()) - response = self.get_json('/baymodels') - self.assertEqual(3, len(response['baymodels'])) - - next_marker = response['baymodels'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - p = mock.patch.object(attr_validator, 'validate_os_resources') - self.mock_valid_os_res = p.start() - self.addCleanup(p.stop) - self.baymodel = obj_utils.create_test_cluster_template( - self.context, - name='bay_model_example_A', - image_id='nerdherd', - apiserver_port=8080, - fixed_network='private', - flavor_id='m1.magnum', - master_flavor_id='m1.magnum', - external_network_id='public', - keypair_id='test', - volume_driver='rexray', - public=False, - docker_volume_size=20, - coe='swarm', - labels={'key1': 'val1', 'key2': 'val2'} - ) - - def test_update_not_found(self): - uuid = uuidutils.generate_uuid() - response = self.patch_json('/baymodels/%s' % uuid, - [{'path': '/name', - 'value': 'bay_model_example_B', - 'op': 'add'}], - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_update_baymodel_with_bay(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - obj_utils.create_test_cluster(self.context, - cluster_template_id=baymodel.uuid) - - response = self.patch_json('/baymodels/%s' % baymodel.uuid, - [{'path': '/name', - 'value': 'bay_model_example_B', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - self.assertIn(baymodel.uuid, response.json['errors'][0]['detail']) - - @mock.patch.object(magnum_policy, 'enforce') - def test_update_public_baymodel_success(self, mock_policy): - mock_policy.return_value = True - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/public', 'value': True, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/baymodels/%s' % self.baymodel.uuid) - self.assertTrue(response['public']) - - @mock.patch.object(magnum_policy, 'enforce') - def test_update_public_baymodel_fail(self, mock_policy): - mock_policy.return_value = False - self.assertRaises(AppError, self.patch_json, - '/baymodels/%s' % self.baymodel.uuid, - [{'path': '/public', 'value': True, - 'op': 'replace'}]) - - def test_update_baymodel_with_bay_allow_update(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - obj_utils.create_test_cluster(self.context, - cluster_template_id=baymodel.uuid) - response = self.patch_json('/baymodels/%s' % baymodel.uuid, - [{'path': '/public', - 'value': True, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(200, response.status_int) - response = self.get_json('/baymodels/%s' % self.baymodel.uuid) - self.assertEqual(response['public'], True) - - def test_update_baymodel_with_bay_not_allow_update(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - obj_utils.create_test_cluster(self.context, - cluster_template_id=baymodel.uuid) - response = self.patch_json('/baymodels/%s' % baymodel.uuid, - [{'path': '/name', - 'value': 'new_name', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(400, response.status_code) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_singular(self, mock_utcnow): - name = 'bay_model_example_B' - test_time = datetime.datetime(2000, 1, 1, 0, 0) - - mock_utcnow.return_value = test_time - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/name', 'value': name, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/baymodels/%s' % self.baymodel.uuid) - self.assertEqual(name, response['name']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - # Assert nothing else was changed - self.assertEqual(self.baymodel.uuid, response['uuid']) - self.assertEqual(self.baymodel.image_id, response['image_id']) - self.assertEqual(self.baymodel.apiserver_port, - response['apiserver_port']) - self.assertEqual(self.baymodel.fixed_network, - response['fixed_network']) - self.assertEqual(self.baymodel.network_driver, - response['network_driver']) - self.assertEqual(self.baymodel.volume_driver, - response['volume_driver']) - self.assertEqual(self.baymodel.docker_volume_size, - response['docker_volume_size']) - self.assertEqual(self.baymodel.coe, - response['coe']) - self.assertEqual(self.baymodel.http_proxy, - response['http_proxy']) - self.assertEqual(self.baymodel.https_proxy, - response['https_proxy']) - self.assertEqual(self.baymodel.no_proxy, - response['no_proxy']) - self.assertEqual(self.baymodel.labels, - response['labels']) - - def test_replace_baymodel_with_no_exist_flavor_id(self): - self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa") - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/flavor_id', 'value': 'aaa', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_baymodel_with_no_exist_keypair_id(self): - self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa") - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/keypair_id', 'value': 'aaa', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(404, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_baymodel_with_no_exist_external_network_id(self): - self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( - "aaa") - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/external_network_id', - 'value': 'aaa', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_baymodel_with_no_exist_image_id(self): - self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa") - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/image_id', 'value': 'aaa', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_create_baymodel_with_no_os_distro_image(self): - image_exce = exception.OSDistroFieldNotFound('img') - self.mock_valid_os_res.side_effect = image_exce - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/image_id', 'value': 'img', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_remove_singular(self): - response = self.get_json('/baymodels/%s' % self.baymodel.uuid) - self.assertIsNotNone(response['dns_nameserver']) - - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/dns_nameserver', - 'op': 'remove'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/baymodels/%s' % self.baymodel.uuid) - self.assertIsNone(response['dns_nameserver']) - # Assert nothing else was changed - self.assertEqual(self.baymodel.uuid, response['uuid']) - self.assertEqual(self.baymodel.name, response['name']) - self.assertEqual(self.baymodel.apiserver_port, - response['apiserver_port']) - self.assertEqual(self.baymodel.image_id, - response['image_id']) - self.assertEqual(self.baymodel.fixed_network, - response['fixed_network']) - self.assertEqual(self.baymodel.network_driver, - response['network_driver']) - self.assertEqual(self.baymodel.volume_driver, - response['volume_driver']) - self.assertEqual(self.baymodel.docker_volume_size, - response['docker_volume_size']) - self.assertEqual(self.baymodel.coe, response['coe']) - self.assertEqual(self.baymodel.http_proxy, response['http_proxy']) - self.assertEqual(self.baymodel.https_proxy, response['https_proxy']) - self.assertEqual(self.baymodel.no_proxy, response['no_proxy']) - self.assertEqual(self.baymodel.labels, response['labels']) - - def test_remove_non_existent_property_fail(self): - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_remove_mandatory_property_fail(self): - mandatory_properties = ('/image_id', '/keypair_id', '/coe', - '/external_network_id', '/server_type', - '/tls_disabled', '/public', - '/registry_enabled', - '/cluster_distro', '/network_driver') - for p in mandatory_properties: - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': p, 'op': 'remove'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_add_root_non_existent(self): - response = self.patch_json( - '/baymodels/%s' % self.baymodel.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_remove_uuid(self): - response = self.patch_json('/baymodels/%s' % self.baymodel.uuid, - [{'path': '/uuid', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - -class TestPost(api_base.FunctionalTest): - - def setUp(self): - super(TestPost, self).setUp() - p = mock.patch.object(attr_validator, 'validate_os_resources') - self.mock_valid_os_res = p.start() - self.addCleanup(p.stop) - - @mock.patch('magnum.api.attr_validator.validate_image') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_baymodel(self, mock_utcnow, - mock_image_data): - bdict = apiutils.baymodel_post_data() - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - - response = self.post_json('/baymodels', bdict) - self.assertEqual(201, response.status_int) - # Check location header - self.assertIsNotNone(response.location) - expected_location = '/v1/baymodels/%s' % bdict['uuid'] - self.assertEqual(expected_location, - urlparse.urlparse(response.location).path) - self.assertEqual(bdict['uuid'], response.json['uuid']) - self.assertNotIn('updated_at', response.json.keys) - return_created_at = timeutils.parse_isotime( - response.json['created_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_created_at) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_set_project_id_and_user_id(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - self.post_json('/baymodels', bdict) - cc_mock.assert_called_once_with(mock.ANY) - self.assertEqual(self.context.project_id, - cc_mock.call_args[0][0]['project_id']) - self.assertEqual(self.context.user_id, - cc_mock.call_args[0][0]['user_id']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_doesnt_contain_id(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data(image_id='my-image') - response = self.post_json('/baymodels', bdict) - self.assertEqual(bdict['image_id'], response.json['image_id']) - cc_mock.assert_called_once_with(mock.ANY) - # Check that 'id' is not in first arg of positional args - self.assertNotIn('id', cc_mock.call_args[0][0]) - - def _create_baymodel_raises_app_error(self, **kwargs): - # Create mock for db and image data - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock,\ - mock.patch('magnum.api.attr_validator.validate_image')\ - as mock_image_data: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data(**kwargs) - self.assertRaises(AppError, self.post_json, '/baymodels', bdict) - self.assertFalse(cc_mock.called) - - def test_create_baymodel_with_invalid_long_string(self): - fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", - "dns_nameserver", "keypair_id", "external_network_id", - "cluster_distro", "fixed_network", "apiserver_port", - "docker_volume_size", "http_proxy", "https_proxy", - "no_proxy", "network_driver", "labels", "volume_driver"] - for field in fields: - self._create_baymodel_raises_app_error(**{field: 'i' * 256}) - - def test_create_baymodel_with_invalid_empty_string(self): - fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", - "dns_nameserver", "keypair_id", "external_network_id", - "cluster_distro", "fixed_network", "apiserver_port", - "docker_volume_size", "labels", "http_proxy", "https_proxy", - "no_proxy", "network_driver", "volume_driver", "coe"] - for field in fields: - self._create_baymodel_raises_app_error(**{field: ''}) - - def test_create_baymodel_with_invalid_coe(self): - self._create_baymodel_raises_app_error(coe='k8s') - self._create_baymodel_raises_app_error(coe='storm') - self._create_baymodel_raises_app_error(coe='meson') - self._create_baymodel_raises_app_error(coe='osomatsu') - - def test_create_baymodel_with_invalid_docker_volume_size(self): - self._create_baymodel_raises_app_error(docker_volume_size=-1) - self._create_baymodel_raises_app_error( - docker_volume_size=1, - docker_storage_driver="devicemapper") - self._create_baymodel_raises_app_error( - docker_volume_size=2, - docker_storage_driver="devicemapper") - self._create_baymodel_raises_app_error(docker_volume_size='notanint') - - def test_create_baymodel_with_invalid_dns_nameserver(self): - self._create_baymodel_raises_app_error(dns_nameserver='1.1.2') - self._create_baymodel_raises_app_error(dns_nameserver='1.1..1') - self._create_baymodel_raises_app_error(dns_nameserver='openstack.org') - - def test_create_baymodel_with_invalid_apiserver_port(self): - self._create_baymodel_raises_app_error(apiserver_port=-12) - self._create_baymodel_raises_app_error(apiserver_port=65536) - self._create_baymodel_raises_app_error(apiserver_port=0) - self._create_baymodel_raises_app_error(apiserver_port=1023) - self._create_baymodel_raises_app_error(apiserver_port='not an int') - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_labels(self, mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data(labels={'key1': 'val1', - 'key2': 'val2'}) - response = self.post_json('/baymodels', bdict) - self.assertEqual(bdict['labels'], - response.json['labels']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_docker_volume_size(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data(docker_volume_size=99) - response = self.post_json('/baymodels', bdict) - self.assertEqual(bdict['docker_volume_size'], - response.json['docker_volume_size']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_overlay(self, mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data( - docker_volume_size=1, docker_storage_driver="overlay") - response = self.post_json('/baymodels', bdict) - self.assertEqual(bdict['docker_volume_size'], - response.json['docker_volume_size']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_generate_uuid(self, - mock_image_data): - # TODO(hongbin): Is this test correct? - pass - - @mock.patch('magnum.api.attr_validator.validate_image') - def _test_create_baymodel_network_driver_attr(self, - baymodel_dict, - baymodel_config_dict, - expect_errors, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - for k, v in baymodel_config_dict.items(): - CONF.set_override(k, v, 'cluster_template') - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - bdict = apiutils.baymodel_post_data(**baymodel_dict) - response = self.post_json('/baymodels', bdict, - expect_errors=expect_errors) - if expect_errors: - self.assertEqual(400, response.status_int) - else: - expected_driver = bdict.get('network_driver') - if not expected_driver: - expected_driver = ( - CONF.cluster_template.swarm_default_network_driver) - self.assertEqual(expected_driver, - response.json['network_driver']) - self.assertEqual(bdict['image_id'], - response.json['image_id']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) - - def test_create_baymodel_with_network_driver(self): - baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'} - config_dict = {} # Default config - expect_errors_flag = False - self._test_create_baymodel_network_driver_attr(baymodel_dict, - config_dict, - expect_errors_flag) - - def test_create_baymodel_with_no_network_driver(self): - baymodel_dict = {} - config_dict = {} - expect_errors_flag = False - self._test_create_baymodel_network_driver_attr(baymodel_dict, - config_dict, - expect_errors_flag) - - def test_create_baymodel_with_network_driver_non_def_config(self): - baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'} - config_dict = { - 'kubernetes_allowed_network_drivers': ['flannel', 'foo']} - expect_errors_flag = False - self._test_create_baymodel_network_driver_attr(baymodel_dict, - config_dict, - expect_errors_flag) - - def test_create_baymodel_with_invalid_network_driver(self): - baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'bad_driver'} - config_dict = { - 'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']} - expect_errors_flag = True - self._test_create_baymodel_network_driver_attr(baymodel_dict, - config_dict, - expect_errors_flag) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_volume_driver(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data(volume_driver='rexray') - response = self.post_json('/baymodels', bdict) - self.assertEqual(bdict['volume_driver'], - response.json['volume_driver']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_no_volume_driver(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - response = self.post_json('/baymodels', bdict) - self.assertEqual(bdict['volume_driver'], - response.json['volume_driver']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - @mock.patch.object(magnum_policy, 'enforce') - def test_create_baymodel_public_success(self, mock_policy, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_policy.return_value = True - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data(public=True) - response = self.post_json('/baymodels', bdict) - self.assertTrue(response.json['public']) - mock_policy.assert_called_with(mock.ANY, "baymodel:publish", - None, do_raise=False) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - self.assertTrue(cc_mock.call_args[0][0]['public']) - - @mock.patch('magnum.api.attr_validator.validate_image') - @mock.patch.object(magnum_policy, 'enforce') - def test_create_baymodel_public_fail(self, mock_policy, - mock_image_data): - with mock.patch.object(self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template): - # make policy enforcement fail - mock_policy.return_value = False - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data(public=True) - self.assertRaises(AppError, self.post_json, '/baymodels', bdict) - - @mock.patch('magnum.api.attr_validator.validate_image') - @mock.patch.object(magnum_policy, 'enforce') - def test_create_baymodel_public_not_set(self, mock_policy, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data(public=False) - response = self.post_json('/baymodels', bdict) - self.assertFalse(response.json['public']) - # policy enforcement is called only once for enforce_wsgi - self.assertEqual(1, mock_policy.call_count) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - self.assertFalse(cc_mock.call_args[0][0]['public']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_no_os_distro_image(self, - mock_image_data): - mock_image_data.side_effect = exception.OSDistroFieldNotFound('img') - bdict = apiutils.baymodel_post_data() - del bdict['uuid'] - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(400, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_os_distro_image(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - del bdict['uuid'] - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(201, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_image_name(self, - mock_image_data): - mock_image = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - mock_image_data.return_value = mock_image - bdict = apiutils.baymodel_post_data() - del bdict['uuid'] - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(201, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_no_exist_image_name(self, - mock_image_data): - mock_image_data.side_effect = exception.ResourceNotFound('test-img') - bdict = apiutils.baymodel_post_data() - del bdict['uuid'] - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(404, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_multi_image_name(self, - mock_image_data): - mock_image_data.side_effect = exception.Conflict('Multiple images') - bdict = apiutils.baymodel_post_data() - del bdict['uuid'] - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(409, response.status_int) - - def test_create_baymodel_without_image_id(self): - bdict = apiutils.baymodel_post_data() - del bdict['image_id'] - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(400, response.status_int) - - def test_create_baymodel_without_keypair_id(self): - bdict = apiutils.baymodel_post_data() - del bdict['keypair_id'] - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(400, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_dns(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - response = self.post_json('/baymodels', bdict) - self.assertEqual(201, response.status_int) - self.assertEqual(bdict['dns_nameserver'], - response.json['dns_nameserver']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_no_exist_keypair(self, - mock_image_data): - self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test") - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(404, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_flavor(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - response = self.post_json('/baymodels', bdict) - self.assertEqual(201, response.status_int) - self.assertEqual(bdict['flavor_id'], - response.json['flavor_id']) - self.assertEqual(bdict['master_flavor_id'], - response.json['master_flavor_id']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_no_exist_flavor(self, - mock_image_data): - self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor") - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(400, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_external_network(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - response = self.post_json('/baymodels', bdict) - self.assertEqual(201, response.status_int) - self.assertEqual(bdict['external_network_id'], - response.json['external_network_id']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_with_no_exist_external_network(self, - mock_image_data): - self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( - "test") - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - response = self.post_json('/baymodels', bdict, expect_errors=True) - self.assertEqual(400, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_baymodel_without_name(self, mock_image_data): - with mock.patch.object(self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.baymodel_post_data() - bdict.pop('name') - resp = self.post_json('/baymodels', bdict) - self.assertEqual(201, resp.status_int) - self.assertIsNotNone(resp.json['name']) - - -class TestDelete(api_base.FunctionalTest): - - def test_delete_baymodel(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - self.delete('/baymodels/%s' % baymodel.uuid) - response = self.get_json('/baymodels/%s' % baymodel.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_baymodel_with_bay(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - obj_utils.create_test_cluster(self.context, - cluster_template_id=baymodel.uuid) - response = self.delete('/baymodels/%s' % baymodel.uuid, - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - self.assertIn(baymodel.uuid, response.json['errors'][0]['detail']) - - def test_delete_baymodel_not_found(self): - uuid = uuidutils.generate_uuid() - response = self.delete('/baymodels/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_baymodel_with_name(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - response = self.delete('/baymodels/%s' % baymodel['name'], - expect_errors=True) - self.assertEqual(204, response.status_int) - - def test_delete_baymodel_with_name_not_found(self): - response = self.delete('/baymodels/not_found', expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_multiple_baymodel_by_name(self): - obj_utils.create_test_cluster_template( - self.context, name='test_baymodel', uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster_template( - self.context, name='test_baymodel', uuid=uuidutils.generate_uuid()) - response = self.delete('/baymodels/test_baymodel', expect_errors=True) - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - -class TestBayModelPolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({rule: "project:non_fake"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "baymodel:get_all", self.get_json, '/baymodels', - expect_errors=True) - - def test_policy_disallow_get_one(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - self._common_policy_check( - "baymodel:get", self.get_json, - '/baymodels/%s' % baymodel.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "baymodel:detail", self.get_json, - '/baymodels/%s/detail' % uuidutils.generate_uuid(), - expect_errors=True) - - def test_policy_disallow_update(self): - baymodel = obj_utils.create_test_cluster_template( - self.context, - name='example_A', - uuid=uuidutils.generate_uuid()) - self._common_policy_check( - "baymodel:update", self.patch_json, - '/baymodels/%s' % baymodel.name, - [{'path': '/name', 'value': "new_name", 'op': 'replace'}], - expect_errors=True) - - def test_policy_disallow_create(self): - bdict = apiutils.baymodel_post_data(name='bay_model_example_A') - self._common_policy_check( - "baymodel:create", self.post_json, '/baymodels', bdict, - expect_errors=True) - - def test_policy_disallow_delete(self): - baymodel = obj_utils.create_test_cluster_template(self.context) - self._common_policy_check( - "baymodel:delete", self.delete, - '/baymodels/%s' % baymodel.uuid, expect_errors=True) - - def _owner_check(self, rule, func, *args, **kwargs): - self.policy.set_rules({rule: "user_id:%(user_id)s"}) - response = func(*args, **kwargs) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_only_owner_get_one(self): - baymodel = obj_utils.create_test_cluster_template(self.context, - user_id='another') - self._owner_check("baymodel:get", self.get_json, - '/baymodels/%s' % baymodel.uuid, expect_errors=True) - - def test_policy_only_owner_update(self): - baymodel = obj_utils.create_test_cluster_template(self.context, - user_id='another') - self._owner_check( - "baymodel:update", self.patch_json, - '/baymodels/%s' % baymodel.uuid, - [{'path': '/name', 'value': "new_name", 'op': 'replace'}], - expect_errors=True) - - def test_policy_only_owner_delete(self): - baymodel = obj_utils.create_test_cluster_template(self.context, - user_id='another') - self._owner_check( - "baymodel:delete", self.delete, '/baymodels/%s' % baymodel.uuid, - expect_errors=True) diff --git a/magnum/tests/unit/api/controllers/v1/test_certificate.py b/magnum/tests/unit/api/controllers/v1/test_certificate.py deleted file mode 100644 index cf84e0cb..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_certificate.py +++ /dev/null @@ -1,274 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from oslo_utils import uuidutils - -from magnum.api.controllers.v1 import certificate as api_cert -from magnum.tests import base -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.api import utils as api_utils -from magnum.tests.unit.objects import utils as obj_utils - - -HEADERS = {'OpenStack-API-Version': 'container-infra latest'} - - -class TestCertObject(base.TestCase): - - @mock.patch('magnum.api.utils.get_resource') - def test_cert_init(self, mock_get_resource): - cert_dict = api_utils.cert_post_data() - mock_cluster = mock.MagicMock() - mock_cluster.uuid = cert_dict['cluster_uuid'] - mock_get_resource.return_value = mock_cluster - - cert = api_cert.Certificate(**cert_dict) - - self.assertEqual(cert_dict['cluster_uuid'], cert.cluster_uuid) - self.assertEqual(cert_dict['csr'], cert.csr) - self.assertEqual(cert_dict['pem'], cert.pem) - - -class TestGetCaCertificate(api_base.FunctionalTest): - - def setUp(self): - super(TestGetCaCertificate, self).setUp() - self.cluster = obj_utils.create_test_cluster(self.context) - - conductor_api_patcher = mock.patch('magnum.conductor.api.API') - self.conductor_api_class = conductor_api_patcher.start() - self.conductor_api = mock.MagicMock() - self.conductor_api_class.return_value = self.conductor_api - self.addCleanup(conductor_api_patcher.stop) - - def test_get_one(self): - fake_cert = api_utils.cert_post_data() - mock_cert = mock.MagicMock() - mock_cert.as_dict.return_value = fake_cert - self.conductor_api.get_ca_certificate.return_value = mock_cert - - response = self.get_json('/certificates/%s' % self.cluster.uuid, - headers=HEADERS) - - self.assertEqual(self.cluster.uuid, response['cluster_uuid']) - # check that bay is still valid as well - self.assertEqual(self.cluster.uuid, response['bay_uuid']) - self.assertEqual(fake_cert['csr'], response['csr']) - self.assertEqual(fake_cert['pem'], response['pem']) - - def test_get_one_by_name(self): - fake_cert = api_utils.cert_post_data() - mock_cert = mock.MagicMock() - mock_cert.as_dict.return_value = fake_cert - self.conductor_api.get_ca_certificate.return_value = mock_cert - - response = self.get_json('/certificates/%s' % self.cluster.name, - headers=HEADERS) - - self.assertEqual(self.cluster.uuid, response['cluster_uuid']) - # check that bay is still valid as well - self.assertEqual(self.cluster.uuid, response['bay_uuid']) - self.assertEqual(fake_cert['csr'], response['csr']) - self.assertEqual(fake_cert['pem'], response['pem']) - - def test_get_one_by_name_not_found(self): - response = self.get_json('/certificates/not_found', - expect_errors=True, headers=HEADERS) - - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_one_by_name_multiple_cluster(self): - obj_utils.create_test_cluster(self.context, name='test_cluster', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster(self.context, name='test_cluster', - uuid=uuidutils.generate_uuid()) - - response = self.get_json('/certificates/test_cluster', - expect_errors=True, headers=HEADERS) - - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_links(self): - fake_cert = api_utils.cert_post_data() - mock_cert = mock.MagicMock() - mock_cert.as_dict.return_value = fake_cert - self.conductor_api.get_ca_certificate.return_value = mock_cert - - response = self.get_json('/certificates/%s' % self.cluster.uuid, - headers=HEADERS) - - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(self.cluster.uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - -class TestPost(api_base.FunctionalTest): - - def setUp(self): - super(TestPost, self).setUp() - self.cluster = obj_utils.create_test_cluster(self.context) - - conductor_api_patcher = mock.patch('magnum.conductor.api.API') - self.conductor_api_class = conductor_api_patcher.start() - self.conductor_api = mock.MagicMock() - self.conductor_api_class.return_value = self.conductor_api - self.addCleanup(conductor_api_patcher.stop) - - self.conductor_api.sign_certificate.side_effect = self._fake_sign - - @staticmethod - def _fake_sign(cluster, cert): - cert.pem = 'fake-pem' - return cert - - def test_create_cert(self, ): - new_cert = api_utils.cert_post_data(cluster_uuid=self.cluster.uuid) - del new_cert['pem'] - - response = self.post_json('/certificates', new_cert, headers=HEADERS) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(new_cert['cluster_uuid'], - response.json['cluster_uuid']) - # verify bay_uuid is still valid as well - self.assertEqual(new_cert['cluster_uuid'], response.json['bay_uuid']) - self.assertEqual('fake-pem', response.json['pem']) - - # Test that bay_uuid is still backward compatible - def test_create_cert_by_bay_name(self, ): - new_cert = api_utils.cert_post_data(cluster_uuid=self.cluster.uuid) - del new_cert['pem'] - new_cert['bay_uuid'] = new_cert['cluster_uuid'] - del new_cert['cluster_uuid'] - - response = self.post_json('/certificates', new_cert, headers=HEADERS) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(self.cluster.uuid, response.json['cluster_uuid']) - # verify bay_uuid is still valid as well - self.assertEqual(self.cluster.uuid, response.json['bay_uuid']) - self.assertEqual('fake-pem', response.json['pem']) - - def test_create_cert_by_cluster_name(self, ): - new_cert = api_utils.cert_post_data(cluster_uuid=self.cluster.name) - del new_cert['pem'] - - response = self.post_json('/certificates', new_cert, headers=HEADERS) - - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(self.cluster.uuid, response.json['cluster_uuid']) - self.assertEqual('fake-pem', response.json['pem']) - - def test_create_cert_cluster_not_found(self, ): - new_cert = api_utils.cert_post_data(cluster_uuid='not_found') - del new_cert['pem'] - - response = self.post_json('/certificates', new_cert, - expect_errors=True, headers=HEADERS) - - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - -class TestRotateCaCertificate(api_base.FunctionalTest): - - def setUp(self): - super(TestRotateCaCertificate, self).setUp() - self.cluster = obj_utils.create_test_cluster(self.context) - - conductor_api_patcher = mock.patch('magnum.conductor.api.API') - self.conductor_api_class = conductor_api_patcher.start() - self.conductor_api = mock.MagicMock() - self.conductor_api_class.return_value = self.conductor_api - self.addCleanup(conductor_api_patcher.stop) - - def test_rotate_ca_cert(self): - fake_cert = api_utils.cert_post_data() - mock_cert = mock.MagicMock() - mock_cert.as_dict.return_value = fake_cert - self.conductor_api.rotate_ca_certificate.return_value = mock_cert - - response = self.patch_json('/certificates/%s' % self.cluster.uuid, - params={}, headers=HEADERS) - - self.assertEqual(202, response.status_code) - - -class TestRotateCaCertificateNonTls(api_base.FunctionalTest): - - def setUp(self): - super(TestRotateCaCertificateNonTls, self).setUp() - self.cluster_template = obj_utils.create_test_cluster_template( - self.context, tls_disabled=True) - self.cluster = obj_utils.create_test_cluster(self.context) - - conductor_api_patcher = mock.patch('magnum.conductor.api.API') - self.conductor_api_class = conductor_api_patcher.start() - self.conductor_api = mock.MagicMock() - self.conductor_api_class.return_value = self.conductor_api - self.addCleanup(conductor_api_patcher.stop) - - def test_rotate_ca_cert_non_tls(self): - fake_cert = api_utils.cert_post_data() - mock_cert = mock.MagicMock() - mock_cert.as_dict.return_value = fake_cert - self.conductor_api.rotate_ca_certificate.return_value = mock_cert - - response = self.patch_json('/certificates/%s' % self.cluster.uuid, - params={}, headers=HEADERS, - expect_errors=True) - self.assertEqual(400, response.status_code) - self.assertIn("Rotating the CA certificate on a non-TLS cluster", - response.json['errors'][0]['detail']) - - -class TestCertPolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({rule: "project_id:non_fake"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_disallow_get_one(self): - cluster = obj_utils.create_test_cluster(self.context) - self._common_policy_check( - "certificate:get", self.get_json, - '/certificates/%s' % cluster.uuid, - expect_errors=True, headers=HEADERS) - - def test_policy_disallow_create(self): - cluster = obj_utils.create_test_cluster(self.context) - cert = api_utils.cert_post_data(cluster_uuid=cluster.uuid) - self._common_policy_check( - "certificate:create", self.post_json, '/certificates', cert, - expect_errors=True, headers=HEADERS) - - def test_policy_disallow_rotate(self): - cluster = obj_utils.create_test_cluster(self.context) - self._common_policy_check( - "certificate:rotate_ca", self.patch_json, - '/certificates/%s' % cluster.uuid, params={}, expect_errors=True, - headers=HEADERS) diff --git a/magnum/tests/unit/api/controllers/v1/test_cluster.py b/magnum/tests/unit/api/controllers/v1/test_cluster.py deleted file mode 100644 index 6f1bee9c..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_cluster.py +++ /dev/null @@ -1,959 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import mock -from oslo_config import cfg -from oslo_utils import timeutils -from oslo_utils import uuidutils -from wsme import types as wtypes - -from magnum.api import attr_validator -from magnum.api.controllers.v1 import cluster as api_cluster -from magnum.common import exception -from magnum.conductor import api as rpcapi -import magnum.conf -from magnum import objects -from magnum.tests import base -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.api import utils as apiutils -from magnum.tests.unit.objects import utils as obj_utils - -CONF = magnum.conf.CONF - - -class TestClusterObject(base.TestCase): - def test_cluster_init(self): - cluster_dict = apiutils.cluster_post_data(cluster_template_id=None) - del cluster_dict['node_count'] - del cluster_dict['master_count'] - del cluster_dict['create_timeout'] - cluster = api_cluster.Cluster(**cluster_dict) - self.assertEqual(1, cluster.node_count) - self.assertEqual(1, cluster.master_count) - self.assertEqual(60, cluster.create_timeout) - - # test unset value for cluster_template_id - cluster.cluster_template_id = wtypes.Unset - self.assertEqual(wtypes.Unset, cluster.cluster_template_id) - - # test backwards compatibility of bay fields with new objects - cluster_dict['create_timeout'] = 15 - cluster = api_cluster.Cluster(**cluster_dict) - self.assertEqual(15, cluster.create_timeout) - - -class TestListCluster(api_base.FunctionalTest): - _cluster_attrs = ("name", "cluster_template_id", "node_count", "status", - "master_count", "stack_id", "create_timeout") - - _expand_cluster_attrs = ("name", "cluster_template_id", "node_count", - "status", "api_address", "discovery_url", - "node_addresses", "master_count", - "master_addresses", "stack_id", - "create_timeout", "status_reason") - - def setUp(self): - super(TestListCluster, self).setUp() - obj_utils.create_test_cluster_template(self.context) - - def test_empty(self): - response = self.get_json('/clusters') - self.assertEqual([], response['clusters']) - - def test_one(self): - cluster = obj_utils.create_test_cluster(self.context) - response = self.get_json('/clusters') - self.assertEqual(cluster.uuid, response['clusters'][0]["uuid"]) - self._verify_attrs(self._cluster_attrs, response['clusters'][0]) - - # Verify attrs do not appear from cluster's get_all response - none_attrs = \ - set(self._expand_cluster_attrs) - set(self._cluster_attrs) - self._verify_attrs(none_attrs, response['clusters'][0], - positive=False) - - def test_get_one(self): - cluster = obj_utils.create_test_cluster(self.context) - response = self.get_json('/clusters/%s' % cluster['uuid']) - self.assertEqual(cluster.uuid, response['uuid']) - self._verify_attrs(self._expand_cluster_attrs, response) - - @mock.patch('magnum.common.clients.OpenStackClients.heat') - def test_get_one_failed_cluster(self, mock_heat): - fake_resources = mock.MagicMock() - fake_resources.resource_name = 'fake_name' - fake_resources.resource_status_reason = 'fake_reason' - - ht = mock.MagicMock() - ht.resources.list.return_value = [fake_resources] - mock_heat.return_value = ht - - cluster = obj_utils.create_test_cluster(self.context, - status='CREATE_FAILED') - response = self.get_json('/clusters/%s' % cluster['uuid']) - self.assertEqual(cluster.uuid, response['uuid']) - self.assertEqual({'fake_name': 'fake_reason'}, response['faults']) - - @mock.patch('magnum.common.clients.OpenStackClients.heat') - def test_get_one_failed_cluster_heatclient_exception(self, mock_heat): - mock_heat.resources.list.side_effect = Exception('fake') - cluster = obj_utils.create_test_cluster(self.context, - status='CREATE_FAILED') - response = self.get_json('/clusters/%s' % cluster['uuid']) - self.assertEqual(cluster.uuid, response['uuid']) - self.assertEqual({}, response['faults']) - - def test_get_one_by_name(self): - cluster = obj_utils.create_test_cluster(self.context) - response = self.get_json('/clusters/%s' % cluster['name']) - self.assertEqual(cluster.uuid, response['uuid']) - self._verify_attrs(self._expand_cluster_attrs, response) - - def test_get_one_by_name_not_found(self): - response = self.get_json( - '/clusters/not_found', - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_one_by_uuid(self): - temp_uuid = uuidutils.generate_uuid() - obj_utils.create_test_cluster(self.context, uuid=temp_uuid) - response = self.get_json( - '/clusters/%s' % temp_uuid) - self.assertEqual(temp_uuid, response['uuid']) - - def test_get_one_by_uuid_not_found(self): - temp_uuid = uuidutils.generate_uuid() - response = self.get_json( - '/clusters/%s' % temp_uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_one_by_name_multiple_cluster(self): - obj_utils.create_test_cluster(self.context, name='test_cluster', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster(self.context, name='test_cluster', - uuid=uuidutils.generate_uuid()) - response = self.get_json('/clusters/test_cluster', - expect_errors=True) - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_all_with_pagination_marker(self): - cluster_list = [] - for id_ in range(4): - temp_uuid = uuidutils.generate_uuid() - cluster = obj_utils.create_test_cluster(self.context, id=id_, - uuid=temp_uuid) - cluster_list.append(cluster) - - response = self.get_json('/clusters?limit=3&marker=%s' - % cluster_list[2].uuid) - self.assertEqual(1, len(response['clusters'])) - self.assertEqual(cluster_list[-1].uuid, - response['clusters'][0]['uuid']) - - def test_detail(self): - cluster = obj_utils.create_test_cluster(self.context) - response = self.get_json('/clusters/detail') - self.assertEqual(cluster.uuid, response['clusters'][0]["uuid"]) - self._verify_attrs(self._expand_cluster_attrs, - response['clusters'][0]) - - def test_detail_with_pagination_marker(self): - cluster_list = [] - for id_ in range(4): - temp_uuid = uuidutils.generate_uuid() - cluster = obj_utils.create_test_cluster(self.context, id=id_, - uuid=temp_uuid) - cluster_list.append(cluster) - - response = self.get_json('/clusters/detail?limit=3&marker=%s' - % cluster_list[2].uuid) - self.assertEqual(1, len(response['clusters'])) - self.assertEqual(cluster_list[-1].uuid, - response['clusters'][0]['uuid']) - self._verify_attrs(self._expand_cluster_attrs, - response['clusters'][0]) - - def test_detail_against_single(self): - cluster = obj_utils.create_test_cluster(self.context) - response = self.get_json('/clusters/%s/detail' % cluster['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - bm_list = [] - for id_ in range(5): - temp_uuid = uuidutils.generate_uuid() - cluster = obj_utils.create_test_cluster(self.context, id=id_, - uuid=temp_uuid) - bm_list.append(cluster.uuid) - response = self.get_json('/clusters') - self.assertEqual(len(bm_list), len(response['clusters'])) - uuids = [b['uuid'] for b in response['clusters']] - self.assertEqual(sorted(bm_list), sorted(uuids)) - - def test_links(self): - uuid = uuidutils.generate_uuid() - obj_utils.create_test_cluster(self.context, id=1, uuid=uuid) - response = self.get_json('/clusters/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], - bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_cluster(self.context, id=id_, - uuid=uuidutils.generate_uuid()) - response = self.get_json('/clusters/?limit=3') - self.assertEqual(3, len(response['clusters'])) - - next_marker = response['clusters'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_cluster(self.context, id=id_, - uuid=uuidutils.generate_uuid()) - response = self.get_json('/clusters') - self.assertEqual(3, len(response['clusters'])) - - next_marker = response['clusters'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - -class TestPatch(api_base.FunctionalTest): - def setUp(self): - super(TestPatch, self).setUp() - self.cluster_template_obj = obj_utils.create_test_cluster_template( - self.context) - self.cluster_obj = obj_utils.create_test_cluster( - self.context, name='cluster_example_A', node_count=3) - p = mock.patch.object(rpcapi.API, 'cluster_update_async') - self.mock_cluster_update = p.start() - self.mock_cluster_update.side_effect = self._sim_rpc_cluster_update - self.addCleanup(p.stop) - - def _sim_rpc_cluster_update(self, cluster, rollback=False): - cluster.save() - return cluster - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok(self, mock_utcnow): - new_node_count = 4 - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, - [{'path': '/node_count', - 'value': new_node_count, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_code) - - response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) - self.assertEqual(new_node_count, response['node_count']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - # Assert nothing else was changed - self.assertEqual(self.cluster_obj.uuid, response['uuid']) - self.assertEqual(self.cluster_obj.cluster_template_id, - response['cluster_template_id']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok_by_name(self, mock_utcnow): - new_node_count = 4 - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.patch_json('/clusters/%s' % self.cluster_obj.name, - [{'path': '/node_count', - 'value': new_node_count, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_code) - - response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) - self.assertEqual(new_node_count, response['node_count']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - # Assert nothing else was changed - self.assertEqual(self.cluster_obj.uuid, response['uuid']) - self.assertEqual(self.cluster_obj.cluster_template_id, - response['cluster_template_id']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok_by_name_not_found(self, mock_utcnow): - name = 'not_found' - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.patch_json('/clusters/%s' % name, - [{'path': '/name', 'value': name, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(404, response.status_code) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok_by_uuid_not_found(self, mock_utcnow): - uuid = uuidutils.generate_uuid() - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.patch_json('/clusters/%s' % uuid, - [{'path': '/cluster_id', 'value': uuid, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(404, response.status_code) - - def test_replace_cluster_template_id_failed(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, - uuid=uuidutils.generate_uuid()) - response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, - [{'path': '/cluster_template_id', - 'value': cluster_template.uuid, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok_by_name_multiple_cluster(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - obj_utils.create_test_cluster(self.context, name='test_cluster', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster(self.context, name='test_cluster', - uuid=uuidutils.generate_uuid()) - - response = self.patch_json('/clusters/test_cluster', - [{'path': '/name', - 'value': 'test_cluster', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(409, response.status_code) - - def test_replace_non_existent_cluster_template_id(self): - response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, - [{'path': '/cluster_template_id', - 'value': uuidutils.generate_uuid(), - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_invalid_node_count(self): - response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, - [{'path': '/node_count', 'value': -1, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_non_existent_cluster(self): - response = self.patch_json('/clusters/%s' % - uuidutils.generate_uuid(), - [{'path': '/name', - 'value': 'cluster_example_B', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_replace_cluster_name_failed(self): - response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, - [{'path': '/name', - 'value': 'cluster_example_B', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_add_non_existent_property(self): - response = self.patch_json( - '/clusters/%s' % self.cluster_obj.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_update_cluster_with_rollback_enabled(self): - response = self.patch_json( - '/clusters/%s/?rollback=True' % self.cluster_obj.uuid, - [{'path': '/node_count', 'value': 4, - 'op': 'replace'}], - headers={'OpenStack-API-Version': 'container-infra 1.3'}) - - self.mock_cluster_update.assert_called_once_with(mock.ANY, True) - self.assertEqual(202, response.status_code) - - def test_update_cluster_with_rollback_disabled(self): - response = self.patch_json( - '/clusters/%s/?rollback=False' % self.cluster_obj.uuid, - [{'path': '/node_count', 'value': 4, - 'op': 'replace'}], - headers={'OpenStack-API-Version': 'container-infra 1.3'}) - - self.mock_cluster_update.assert_called_once_with(mock.ANY, False) - self.assertEqual(202, response.status_code) - - def test_remove_ok(self): - response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) - self.assertIsNotNone(response['name']) - - response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, - [{'path': '/node_count', - 'op': 'remove'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_code) - - response = self.get_json('/clusters/%s' % self.cluster_obj.uuid) - # only allow node_count for cluster, and default value is 1 - self.assertEqual(1, response['node_count']) - # Assert nothing else was changed - self.assertEqual(self.cluster_obj.uuid, response['uuid']) - self.assertEqual(self.cluster_obj.cluster_template_id, - response['cluster_template_id']) - self.assertEqual(self.cluster_obj.name, response['name']) - self.assertEqual(self.cluster_obj.master_count, - response['master_count']) - - def test_remove_mandatory_property_fail(self): - mandatory_properties = ('/uuid', '/cluster_template_id') - for p in mandatory_properties: - response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid, - [{'path': p, 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_remove_non_existent_property(self): - response = self.patch_json( - '/clusters/%s' % self.cluster_obj.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - -class TestPost(api_base.FunctionalTest): - def setUp(self): - super(TestPost, self).setUp() - self.cluster_template = obj_utils.create_test_cluster_template( - self.context) - p = mock.patch.object(rpcapi.API, 'cluster_create_async') - self.mock_cluster_create = p.start() - self.mock_cluster_create.side_effect = self._simulate_cluster_create - self.addCleanup(p.stop) - p = mock.patch.object(attr_validator, 'validate_os_resources') - self.mock_valid_os_res = p.start() - self.addCleanup(p.stop) - - def _simulate_cluster_create(self, cluster, create_timeout): - cluster.create() - return cluster - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_cluster(self, mock_utcnow): - bdict = apiutils.cluster_post_data() - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.post_json('/clusters', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_cluster_resource_limit_reached(self, mock_utcnow): - # override max_cluster_per_project to 1 - CONF.set_override('max_clusters_per_project', 1, group='quotas') - - bdict = apiutils.cluster_post_data() - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - # create first cluster - response = self.post_json('/clusters', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) - - # now try to create second cluster and make sure it fails - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(403, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_cluster_set_project_id_and_user_id(self): - bdict = apiutils.cluster_post_data() - - def _simulate_rpc_cluster_create(cluster, create_timeout): - self.assertEqual(self.context.project_id, cluster.project_id) - self.assertEqual(self.context.user_id, cluster.user_id) - cluster.create() - return cluster - - self.mock_cluster_create.side_effect = _simulate_rpc_cluster_create - - self.post_json('/clusters', bdict) - - def test_create_cluster_doesnt_contain_id(self): - with mock.patch.object(self.dbapi, 'create_cluster', - wraps=self.dbapi.create_cluster) as cc_mock: - bdict = apiutils.cluster_post_data(name='cluster_example_A') - response = self.post_json('/clusters', bdict) - cc_mock.assert_called_once_with(mock.ANY) - # Check that 'id' is not in first arg of positional args - self.assertNotIn('id', cc_mock.call_args[0][0]) - self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) - - def test_create_cluster_generate_uuid(self): - bdict = apiutils.cluster_post_data() - del bdict['uuid'] - - response = self.post_json('/clusters', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) - - def test_create_cluster_no_cluster_template_id(self): - bdict = apiutils.cluster_post_data() - del bdict['cluster_template_id'] - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - - def test_create_cluster_with_non_existent_cluster_template_id(self): - temp_uuid = uuidutils.generate_uuid() - bdict = apiutils.cluster_post_data(cluster_template_id=temp_uuid) - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_cluster_with_non_existent_cluster_template_name(self): - modelname = 'notfound' - bdict = apiutils.cluster_post_data(cluster_template_id=modelname) - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_cluster_with_cluster_template_name(self): - modelname = self.cluster_template.name - bdict = apiutils.cluster_post_data(cluster_template_id=modelname) - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_with_node_count_zero(self): - bdict = apiutils.cluster_post_data() - bdict['node_count'] = 0 - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_cluster_with_node_count_negative(self): - bdict = apiutils.cluster_post_data() - bdict['node_count'] = -1 - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_cluster_with_no_node_count(self): - bdict = apiutils.cluster_post_data() - del bdict['node_count'] - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_with_master_count_zero(self): - bdict = apiutils.cluster_post_data() - bdict['master_count'] = 0 - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_cluster_with_no_master_count(self): - bdict = apiutils.cluster_post_data() - del bdict['master_count'] - response = self.post_json('/clusters', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_with_invalid_name(self): - invalid_names = ['x' * 243, '123456', '123456test_cluster', - '-test_cluster', '.test_cluster', '_test_cluster', ''] - for value in invalid_names: - bdict = apiutils.cluster_post_data(name=value) - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_cluster_with_valid_name(self): - valid_names = ['test_cluster123456', 'test-cluster', 'test.cluster', - 'testcluster.', 'testcluster-', 'testcluster_', - 'test.-_cluster', 'Testcluster'] - for value in valid_names: - bdict = apiutils.cluster_post_data(name=value) - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_without_name(self): - bdict = apiutils.cluster_post_data() - del bdict['name'] - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_with_timeout_none(self): - bdict = apiutils.cluster_post_data() - bdict['create_timeout'] = None - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_with_no_timeout(self): - def _simulate_rpc_cluster_create(cluster, create_timeout): - self.assertEqual(60, create_timeout) - cluster.create() - return cluster - - self.mock_cluster_create.side_effect = _simulate_rpc_cluster_create - bdict = apiutils.cluster_post_data() - del bdict['create_timeout'] - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_with_timeout_negative(self): - bdict = apiutils.cluster_post_data() - bdict['create_timeout'] = -1 - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_create_cluster_with_timeout_zero(self): - bdict = apiutils.cluster_post_data() - bdict['create_timeout'] = 0 - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_with_invalid_flavor(self): - bdict = apiutils.cluster_post_data() - self.mock_valid_os_res.side_effect = exception.FlavorNotFound( - 'test-flavor') - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(400, response.status_int) - - def test_create_cluster_with_invalid_ext_network(self): - bdict = apiutils.cluster_post_data() - self.mock_valid_os_res.side_effect = \ - exception.ExternalNetworkNotFound('test-net') - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(400, response.status_int) - - def test_create_cluster_with_invalid_keypair(self): - bdict = apiutils.cluster_post_data() - self.mock_valid_os_res.side_effect = exception.KeyPairNotFound( - 'test-key') - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(404, response.status_int) - - def test_create_cluster_with_nonexist_image(self): - bdict = apiutils.cluster_post_data() - self.mock_valid_os_res.side_effect = exception.ImageNotFound( - 'test-img') - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(400, response.status_int) - - def test_create_cluster_with_multi_images_same_name(self): - bdict = apiutils.cluster_post_data() - self.mock_valid_os_res.side_effect = exception.Conflict('test-img') - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(409, response.status_int) - - def test_create_cluster_with_on_os_distro_image(self): - bdict = apiutils.cluster_post_data() - self.mock_valid_os_res.side_effect = \ - exception.OSDistroFieldNotFound('img') - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(400, response.status_int) - - def test_create_cluster_with_no_lb_one_node(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, name='foo', uuid='foo', master_lb_enabled=False) - bdict = apiutils.cluster_post_data( - cluster_template_id=cluster_template.name, master_count=1) - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - - def test_create_cluster_with_no_lb_multi_node(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, name='foo', uuid='foo', master_lb_enabled=False) - bdict = apiutils.cluster_post_data( - cluster_template_id=cluster_template.name, master_count=3) - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - - def test_create_cluster_with_keypair(self): - bdict = apiutils.cluster_post_data() - bdict['keypair'] = 'keypair2' - response = self.post_json('/clusters', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - cluster, timeout = self.mock_cluster_create.call_args - self.assertEqual('keypair2', cluster[0].keypair) - - def test_create_cluster_without_keypair(self): - bdict = apiutils.cluster_post_data() - response = self.post_json('/clusters', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - cluster, timeout = self.mock_cluster_create.call_args - # Verify keypair from ClusterTemplate is used - self.assertEqual('keypair1', cluster[0].keypair) - - def test_create_cluster_with_multi_keypair_same_name(self): - bdict = apiutils.cluster_post_data() - self.mock_valid_os_res.side_effect = exception.Conflict('keypair2') - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertTrue(self.mock_valid_os_res.called) - self.assertEqual(409, response.status_int) - - def test_create_cluster_with_docker_volume_size(self): - bdict = apiutils.cluster_post_data() - bdict['docker_volume_size'] = 3 - response = self.post_json('/clusters', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - cluster, timeout = self.mock_cluster_create.call_args - self.assertEqual(3, cluster[0].docker_volume_size) - - def test_create_cluster_without_docker_volume_size(self): - bdict = apiutils.cluster_post_data() - # Remove the default docker_volume_size from the cluster dict. - del bdict['docker_volume_size'] - response = self.post_json('/clusters', bdict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - cluster, timeout = self.mock_cluster_create.call_args - # Verify docker_volume_size from ClusterTemplate is used - self.assertEqual(20, cluster[0].docker_volume_size) - - def test_create_cluster_with_invalid_docker_volume_size(self): - invalid_values = [(-1, None), ('notanint', None), - (1, 'devicemapper'), (2, 'devicemapper')] - for value in invalid_values: - bdict = apiutils.cluster_post_data(docker_volume_size=value[0], - docker_storage_driver=value[1]) - response = self.post_json('/clusters', bdict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - -class TestDelete(api_base.FunctionalTest): - def setUp(self): - super(TestDelete, self).setUp() - self.cluster_template = obj_utils.create_test_cluster_template( - self.context) - self.cluster = obj_utils.create_test_cluster(self.context) - p = mock.patch.object(rpcapi.API, 'cluster_delete_async') - self.mock_cluster_delete = p.start() - self.mock_cluster_delete.side_effect = self._simulate_cluster_delete - self.addCleanup(p.stop) - - def _simulate_cluster_delete(self, cluster_uuid): - cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid) - cluster.destroy() - - def test_delete_cluster(self): - self.delete('/clusters/%s' % self.cluster.uuid) - response = self.get_json('/clusters/%s' % self.cluster.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_cluster_not_found(self): - uuid = uuidutils.generate_uuid() - response = self.delete('/clusters/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_cluster_with_name_not_found(self): - response = self.delete('/clusters/not_found', expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_cluster_with_name(self): - response = self.delete('/clusters/%s' % self.cluster.name, - expect_errors=True) - self.assertEqual(204, response.status_int) - - def test_delete_multiple_cluster_by_name(self): - obj_utils.create_test_cluster(self.context, name='test_cluster', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster(self.context, name='test_cluster', - uuid=uuidutils.generate_uuid()) - response = self.delete('/clusters/test_cluster', expect_errors=True) - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - -class TestClusterPolicyEnforcement(api_base.FunctionalTest): - def setUp(self): - super(TestClusterPolicyEnforcement, self).setUp() - obj_utils.create_test_cluster_template(self.context) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({rule: "project:non_fake"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "cluster:get_all", self.get_json, '/clusters', expect_errors=True) - - def test_policy_disallow_get_one(self): - self.cluster = obj_utils.create_test_cluster(self.context) - self._common_policy_check( - "cluster:get", self.get_json, '/clusters/%s' % self.cluster.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "cluster:detail", self.get_json, - '/clusters/%s/detail' % uuidutils.generate_uuid(), - expect_errors=True) - - def test_policy_disallow_update(self): - self.cluster = obj_utils.create_test_cluster(self.context, - name='cluster_example_A', - node_count=3) - self._common_policy_check( - "cluster:update", self.patch_json, '/clusters/%s' % - self.cluster.name, - [{'path': '/name', 'value': "new_name", 'op': 'replace'}], - expect_errors=True) - - def test_policy_disallow_create(self): - bdict = apiutils.cluster_post_data(name='cluster_example_A') - self._common_policy_check( - "cluster:create", self.post_json, '/clusters', bdict, - expect_errors=True) - - def _simulate_cluster_delete(self, cluster_uuid): - cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid) - cluster.destroy() - - def test_policy_disallow_delete(self): - p = mock.patch.object(rpcapi.API, 'cluster_delete') - self.mock_cluster_delete = p.start() - self.mock_cluster_delete.side_effect = self._simulate_cluster_delete - self.addCleanup(p.stop) - self.cluster = obj_utils.create_test_cluster(self.context) - self._common_policy_check( - "cluster:delete", self.delete, '/clusters/%s' % - self.cluster.uuid, - expect_errors=True) - - def _owner_check(self, rule, func, *args, **kwargs): - self.policy.set_rules({rule: "user_id:%(user_id)s"}) - response = func(*args, **kwargs) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_only_owner_get_one(self): - cluster = obj_utils.create_test_cluster(self.context, - user_id='another') - self._owner_check("cluster:get", self.get_json, - '/clusters/%s' % cluster.uuid, - expect_errors=True) - - def test_policy_only_owner_update(self): - cluster = obj_utils.create_test_cluster(self.context, - user_id='another') - self._owner_check( - "cluster:update", self.patch_json, - '/clusters/%s' % cluster.uuid, - [{'path': '/name', 'value': "new_name", 'op': 'replace'}], - expect_errors=True) - - def test_policy_only_owner_delete(self): - cluster = obj_utils.create_test_cluster(self.context, - user_id='another') - self._owner_check("cluster:delete", self.delete, - '/clusters/%s' % cluster.uuid, - expect_errors=True) diff --git a/magnum/tests/unit/api/controllers/v1/test_cluster_template.py b/magnum/tests/unit/api/controllers/v1/test_cluster_template.py deleted file mode 100644 index 1a3c6e99..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_cluster_template.py +++ /dev/null @@ -1,1129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import mock -from oslo_config import cfg -from oslo_utils import timeutils -from oslo_utils import uuidutils -from six.moves.urllib import parse as urlparse -from webtest.app import AppError -from wsme import types as wtypes - -from magnum.api import attr_validator -from magnum.api.controllers.v1 import cluster_template as api_cluster_template -from magnum.common import exception -from magnum.common import policy as magnum_policy -from magnum.tests import base -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.api import utils as apiutils -from magnum.tests.unit.objects import utils as obj_utils - - -class TestClusterTemplateObject(base.TestCase): - - def test_cluster_template_init(self): - cluster_template_dict = apiutils.cluster_template_post_data() - del cluster_template_dict['image_id'] - del cluster_template_dict['registry_enabled'] - del cluster_template_dict['tls_disabled'] - del cluster_template_dict['public'] - del cluster_template_dict['server_type'] - del cluster_template_dict['master_lb_enabled'] - del cluster_template_dict['floating_ip_enabled'] - cluster_template = api_cluster_template.ClusterTemplate( - **cluster_template_dict) - self.assertEqual(wtypes.Unset, cluster_template.image_id) - self.assertFalse(cluster_template.registry_enabled) - self.assertFalse(cluster_template.tls_disabled) - self.assertFalse(cluster_template.public) - self.assertEqual('vm', cluster_template.server_type) - self.assertFalse(cluster_template.master_lb_enabled) - self.assertTrue(cluster_template.floating_ip_enabled) - - -class TestListClusterTemplate(api_base.FunctionalTest): - - _cluster_template_attrs = ('name', 'apiserver_port', 'network_driver', - 'coe', 'flavor_id', 'fixed_network', - 'dns_nameserver', 'http_proxy', - 'docker_volume_size', 'server_type', - 'cluster_distro', 'external_network_id', - 'image_id', 'registry_enabled', 'no_proxy', - 'keypair_id', 'https_proxy', 'tls_disabled', - 'public', 'labels', 'master_flavor_id', - 'volume_driver', 'insecure_registry') - - def test_empty(self): - response = self.get_json('/clustertemplates') - self.assertEqual([], response['clustertemplates']) - - def test_one(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/clustertemplates') - self.assertEqual(cluster_template.uuid, - response['clustertemplates'][0]["uuid"]) - self._verify_attrs(self._cluster_template_attrs, - response['clustertemplates'][0]) - - def test_get_one(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/clustertemplates/%s' % - cluster_template['uuid']) - self.assertEqual(cluster_template.uuid, response['uuid']) - self._verify_attrs(self._cluster_template_attrs, response) - - def test_get_one_by_name(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/clustertemplates/%s' % - cluster_template['name']) - self.assertEqual(cluster_template.uuid, response['uuid']) - self._verify_attrs(self._cluster_template_attrs, response) - - def test_get_one_by_name_not_found(self): - response = self.get_json( - '/clustertemplates/not_found', - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_one_by_uuid(self): - temp_uuid = uuidutils.generate_uuid() - obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid) - response = self.get_json( - '/clustertemplates/%s' % temp_uuid) - self.assertEqual(temp_uuid, response['uuid']) - - def test_get_one_by_uuid_not_found(self): - temp_uuid = uuidutils.generate_uuid() - response = self.get_json( - '/clustertemplates/%s' % temp_uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_one_by_name_multiple_cluster_template(self): - obj_utils.create_test_cluster_template( - self.context, name='test_clustertemplate', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster_template( - self.context, name='test_clustertemplate', - uuid=uuidutils.generate_uuid()) - response = self.get_json( - '/clustertemplates/test_clustertemplate', - expect_errors=True) - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_all_with_pagination_marker(self): - bm_list = [] - for id_ in range(4): - cluster_template = obj_utils.create_test_cluster_template( - self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bm_list.append(cluster_template) - - response = self.get_json('/clustertemplates?limit=3&marker=%s' - % bm_list[2].uuid) - self.assertEqual(1, len(response['clustertemplates'])) - self.assertEqual(bm_list[-1].uuid, - response['clustertemplates'][0]['uuid']) - - def test_detail(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/clustertemplates/detail') - self.assertEqual(cluster_template.uuid, - response['clustertemplates'][0]["uuid"]) - self._verify_attrs(self._cluster_template_attrs, - response['clustertemplates'][0]) - - def test_detail_with_pagination_marker(self): - bm_list = [] - for id_ in range(4): - cluster_template = obj_utils.create_test_cluster_template( - self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bm_list.append(cluster_template) - - response = self.get_json('/clustertemplates/detail?limit=3&marker=%s' - % bm_list[2].uuid) - self.assertEqual(1, len(response['clustertemplates'])) - self.assertEqual(bm_list[-1].uuid, - response['clustertemplates'][0]['uuid']) - self._verify_attrs(self._cluster_template_attrs, - response['clustertemplates'][0]) - - def test_detail_against_single(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - response = self.get_json('/clustertemplates/%s/detail' % - cluster_template['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - bm_list = [] - for id_ in range(5): - cluster_template = obj_utils.create_test_cluster_template( - self.context, id=id_, - uuid=uuidutils.generate_uuid()) - bm_list.append(cluster_template.uuid) - response = self.get_json('/clustertemplates') - self.assertEqual(len(bm_list), len(response['clustertemplates'])) - uuids = [bm['uuid'] for bm in response['clustertemplates']] - self.assertEqual(sorted(bm_list), sorted(uuids)) - - def test_links(self): - uuid = uuidutils.generate_uuid() - obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid) - response = self.get_json('/clustertemplates/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], - bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_cluster_template( - self.context, id=id_, uuid=uuidutils.generate_uuid()) - response = self.get_json('/clustertemplates/?limit=3') - self.assertEqual(3, len(response['clustertemplates'])) - - next_marker = response['clustertemplates'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_cluster_template( - self.context, id=id_, uuid=uuidutils.generate_uuid()) - response = self.get_json('/clustertemplates') - self.assertEqual(3, len(response['clustertemplates'])) - - next_marker = response['clustertemplates'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - p = mock.patch.object(attr_validator, 'validate_os_resources') - self.mock_valid_os_res = p.start() - self.addCleanup(p.stop) - self.cluster_template = obj_utils.create_test_cluster_template( - self.context, - name='cluster_model_example_A', - image_id='nerdherd', - apiserver_port=8080, - fixed_network='private', - flavor_id='m1.magnum', - master_flavor_id='m1.magnum', - external_network_id='public', - keypair_id='test', - volume_driver='rexray', - public=False, - docker_volume_size=20, - coe='swarm', - labels={'key1': 'val1', 'key2': 'val2'} - ) - - def test_update_not_found(self): - uuid = uuidutils.generate_uuid() - response = self.patch_json('/clustertemplates/%s' % uuid, - [{'path': '/name', - 'value': 'cluster_model_example_B', - 'op': 'add'}], - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_update_cluster_template_with_cluster(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - obj_utils.create_test_cluster( - self.context, cluster_template_id=cluster_template.uuid) - - response = self.patch_json('/clustertemplates/%s' % - cluster_template.uuid, - [{'path': '/name', - 'value': 'cluster_model_example_B', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - self.assertIn(cluster_template.uuid, - response.json['errors'][0]['detail']) - - @mock.patch.object(magnum_policy, 'enforce') - def test_update_public_cluster_template_success(self, mock_policy): - mock_policy.return_value = True - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/public', 'value': True, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/clustertemplates/%s' % - self.cluster_template.uuid) - self.assertTrue(response['public']) - - @mock.patch.object(magnum_policy, 'enforce') - def test_update_public_cluster_template_fail(self, mock_policy): - mock_policy.return_value = False - self.assertRaises(AppError, self.patch_json, - '/clustertemplates/%s' % self.cluster_template.uuid, - [{'path': '/public', 'value': True, - 'op': 'replace'}]) - - def test_update_cluster_template_with_cluster_allow_update(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - obj_utils.create_test_cluster( - self.context, cluster_template_id=cluster_template.uuid) - response = self.patch_json('/clustertemplates/%s' % - cluster_template.uuid, - [{'path': '/public', - 'value': True, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(200, response.status_int) - response = self.get_json('/clustertemplates/%s' % - self.cluster_template.uuid) - self.assertEqual(response['public'], True) - - def test_update_cluster_template_with_cluster_not_allow_update(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - obj_utils.create_test_cluster( - self.context, cluster_template_id=cluster_template.uuid) - response = self.patch_json('/clustertemplates/%s' % - cluster_template.uuid, - [{'path': '/name', - 'value': 'new_name', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(400, response.status_code) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_singular(self, mock_utcnow): - name = 'cluster_model_example_B' - test_time = datetime.datetime(2000, 1, 1, 0, 0) - - mock_utcnow.return_value = test_time - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/name', 'value': name, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/clustertemplates/%s' % - self.cluster_template.uuid) - self.assertEqual(name, response['name']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - # Assert nothing else was changed - self.assertEqual(self.cluster_template.uuid, response['uuid']) - self.assertEqual(self.cluster_template.image_id, response['image_id']) - self.assertEqual(self.cluster_template.apiserver_port, - response['apiserver_port']) - self.assertEqual(self.cluster_template.fixed_network, - response['fixed_network']) - self.assertEqual(self.cluster_template.network_driver, - response['network_driver']) - self.assertEqual(self.cluster_template.volume_driver, - response['volume_driver']) - self.assertEqual(self.cluster_template.docker_volume_size, - response['docker_volume_size']) - self.assertEqual(self.cluster_template.coe, - response['coe']) - self.assertEqual(self.cluster_template.http_proxy, - response['http_proxy']) - self.assertEqual(self.cluster_template.https_proxy, - response['https_proxy']) - self.assertEqual(self.cluster_template.no_proxy, - response['no_proxy']) - self.assertEqual(self.cluster_template.labels, - response['labels']) - - def test_replace_cluster_template_with_no_exist_flavor_id(self): - self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa") - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/flavor_id', 'value': 'aaa', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_cluster_template_with_no_exist_keypair_id(self): - self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa") - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/keypair_id', 'value': 'aaa', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(404, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_cluster_template_with_no_exist_external_network_id(self): - self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( - "aaa") - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/external_network_id', - 'value': 'aaa', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_replace_cluster_template_with_no_exist_image_id(self): - self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa") - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/image_id', 'value': 'aaa', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_create_cluster_template_with_no_os_distro_image(self): - image_exce = exception.OSDistroFieldNotFound('img') - self.mock_valid_os_res.side_effect = image_exce - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/image_id', 'value': 'img', - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_remove_singular(self): - response = self.get_json('/clustertemplates/%s' % - self.cluster_template.uuid) - self.assertIsNotNone(response['dns_nameserver']) - - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/dns_nameserver', - 'op': 'remove'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/clustertemplates/%s' % - self.cluster_template.uuid) - self.assertIsNone(response['dns_nameserver']) - # Assert nothing else was changed - self.assertEqual(self.cluster_template.uuid, response['uuid']) - self.assertEqual(self.cluster_template.name, response['name']) - self.assertEqual(self.cluster_template.apiserver_port, - response['apiserver_port']) - self.assertEqual(self.cluster_template.image_id, - response['image_id']) - self.assertEqual(self.cluster_template.fixed_network, - response['fixed_network']) - self.assertEqual(self.cluster_template.network_driver, - response['network_driver']) - self.assertEqual(self.cluster_template.volume_driver, - response['volume_driver']) - self.assertEqual(self.cluster_template.docker_volume_size, - response['docker_volume_size']) - self.assertEqual(self.cluster_template.coe, response['coe']) - self.assertEqual(self.cluster_template.http_proxy, - response['http_proxy']) - self.assertEqual(self.cluster_template.https_proxy, - response['https_proxy']) - self.assertEqual(self.cluster_template.no_proxy, response['no_proxy']) - self.assertEqual(self.cluster_template.labels, response['labels']) - - def test_remove_non_existent_property_fail(self): - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/non-existent', - 'op': 'remove'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_remove_mandatory_property_fail(self): - mandatory_properties = ('/image_id', '/coe', - '/external_network_id', '/server_type', - '/tls_disabled', '/public', - '/registry_enabled', - '/cluster_distro', '/network_driver') - for p in mandatory_properties: - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': p, 'op': 'remove'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['errors']) - - def test_add_root_non_existent(self): - response = self.patch_json( - '/clustertemplates/%s' % self.cluster_template.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - def test_remove_uuid(self): - response = self.patch_json('/clustertemplates/%s' % - self.cluster_template.uuid, - [{'path': '/uuid', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - -class TestPost(api_base.FunctionalTest): - - def setUp(self): - super(TestPost, self).setUp() - p = mock.patch.object(attr_validator, 'validate_os_resources') - self.mock_valid_os_res = p.start() - self.addCleanup(p.stop) - - @mock.patch('magnum.api.attr_validator.validate_image') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_cluster_template(self, mock_utcnow, - mock_image_data): - bdict = apiutils.cluster_template_post_data() - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(201, response.status_int) - # Check location header - self.assertIsNotNone(response.location) - expected_location = '/v1/clustertemplates/%s' % bdict['uuid'] - self.assertEqual(expected_location, - urlparse.urlparse(response.location).path) - self.assertEqual(bdict['uuid'], response.json['uuid']) - self.assertNotIn('updated_at', response.json.keys) - return_created_at = timeutils.parse_isotime( - response.json['created_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_created_at) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_set_project_id_and_user_id( - self, mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - self.post_json('/clustertemplates', bdict) - cc_mock.assert_called_once_with(mock.ANY) - self.assertEqual(self.context.project_id, - cc_mock.call_args[0][0]['project_id']) - self.assertEqual(self.context.user_id, - cc_mock.call_args[0][0]['user_id']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_doesnt_contain_id(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data(image_id='my-image') - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(bdict['image_id'], response.json['image_id']) - cc_mock.assert_called_once_with(mock.ANY) - # Check that 'id' is not in first arg of positional args - self.assertNotIn('id', cc_mock.call_args[0][0]) - - def _create_model_raises_app_error(self, **kwargs): - # Create mock for db and image data - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock,\ - mock.patch('magnum.api.attr_validator.validate_image')\ - as mock_image_data: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data(**kwargs) - self.assertRaises(AppError, self.post_json, '/clustertemplates', - bdict) - self.assertFalse(cc_mock.called) - - def test_create_cluster_template_with_invalid_long_string(self): - fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", - "dns_nameserver", "keypair_id", "external_network_id", - "cluster_distro", "fixed_network", "apiserver_port", - "docker_volume_size", "http_proxy", "https_proxy", - "no_proxy", "network_driver", "labels", "volume_driver"] - for field in fields: - self._create_model_raises_app_error(**{field: 'i' * 256}) - - def test_create_cluster_template_with_invalid_empty_string(self): - fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id", - "dns_nameserver", "keypair_id", "external_network_id", - "cluster_distro", "fixed_network", "apiserver_port", - "docker_volume_size", "labels", "http_proxy", "https_proxy", - "no_proxy", "network_driver", "volume_driver", "coe"] - for field in fields: - self._create_model_raises_app_error(**{field: ''}) - - def test_create_cluster_template_with_invalid_coe(self): - self._create_model_raises_app_error(coe='k8s') - self._create_model_raises_app_error(coe='storm') - self._create_model_raises_app_error(coe='meson') - self._create_model_raises_app_error(coe='osomatsu') - - def test_create_cluster_template_with_invalid_docker_volume_size(self): - self._create_model_raises_app_error(docker_volume_size=-1) - self._create_model_raises_app_error( - docker_volume_size=1, - docker_storage_driver="devicemapper") - self._create_model_raises_app_error( - docker_volume_size=2, - docker_storage_driver="devicemapper") - self._create_model_raises_app_error(docker_volume_size='notanint') - - def test_create_cluster_template_with_invalid_dns_nameserver(self): - self._create_model_raises_app_error(dns_nameserver='1.1.2') - self._create_model_raises_app_error(dns_nameserver='1.1..1') - self._create_model_raises_app_error(dns_nameserver='openstack.org') - - def test_create_cluster_template_with_invalid_apiserver_port(self): - self._create_model_raises_app_error(apiserver_port=-12) - self._create_model_raises_app_error(apiserver_port=65536) - self._create_model_raises_app_error(apiserver_port=0) - self._create_model_raises_app_error(apiserver_port=1023) - self._create_model_raises_app_error(apiserver_port='not an int') - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_labels(self, mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data( - labels={'key1': 'val1', 'key2': 'val2'}) - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(bdict['labels'], - response.json['labels']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_docker_volume_size(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data(docker_volume_size=99) - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(bdict['docker_volume_size'], - response.json['docker_volume_size']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_overlay(self, mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data( - docker_volume_size=1, docker_storage_driver="overlay") - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(bdict['docker_volume_size'], - response.json['docker_volume_size']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - def _test_create_cluster_template_network_driver_attr( - self, - cluster_template_dict, - cluster_template_config_dict, - expect_errors, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - for k, v in cluster_template_config_dict.items(): - cfg.CONF.set_override(k, v, 'cluster_template') - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - - bdict = apiutils.cluster_template_post_data( - **cluster_template_dict) - response = self.post_json('/clustertemplates', bdict, - expect_errors=expect_errors) - if expect_errors: - self.assertEqual(400, response.status_int) - else: - expected_driver = bdict.get('network_driver') - if not expected_driver: - expected_driver = ( - cfg.CONF.cluster_template.swarm_default_network_driver) - self.assertEqual(expected_driver, - response.json['network_driver']) - self.assertEqual(bdict['image_id'], - response.json['image_id']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) - - def test_create_cluster_template_with_network_driver(self): - cluster_template_dict = {'coe': 'kubernetes', - 'network_driver': 'flannel'} - config_dict = {} # Default config - expect_errors_flag = False - self._test_create_cluster_template_network_driver_attr( - cluster_template_dict, - config_dict, - expect_errors_flag) - - def test_create_cluster_template_with_no_network_driver(self): - cluster_template_dict = {} - config_dict = {} - expect_errors_flag = False - self._test_create_cluster_template_network_driver_attr( - cluster_template_dict, - config_dict, - expect_errors_flag) - - def test_create_cluster_template_with_network_driver_non_def_config(self): - cluster_template_dict = {'coe': 'kubernetes', - 'network_driver': 'flannel'} - config_dict = { - 'kubernetes_allowed_network_drivers': ['flannel', 'foo']} - expect_errors_flag = False - self._test_create_cluster_template_network_driver_attr( - cluster_template_dict, - config_dict, - expect_errors_flag) - - def test_create_cluster_template_with_invalid_network_driver(self): - cluster_template_dict = {'coe': 'kubernetes', - 'network_driver': 'bad_driver'} - config_dict = { - 'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']} - expect_errors_flag = True - self._test_create_cluster_template_network_driver_attr( - cluster_template_dict, - config_dict, - expect_errors_flag) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_volume_driver(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data(volume_driver='rexray') - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(bdict['volume_driver'], - response.json['volume_driver']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_no_volume_driver(self, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(bdict['volume_driver'], - response.json['volume_driver']) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - - @mock.patch('magnum.api.attr_validator.validate_image') - @mock.patch.object(magnum_policy, 'enforce') - def test_create_cluster_template_public_success(self, mock_policy, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_policy.return_value = True - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data(public=True) - response = self.post_json('/clustertemplates', bdict) - self.assertTrue(response.json['public']) - mock_policy.assert_called_with(mock.ANY, - "clustertemplate:publish", - None, do_raise=False) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - self.assertTrue(cc_mock.call_args[0][0]['public']) - - @mock.patch('magnum.api.attr_validator.validate_image') - @mock.patch.object(magnum_policy, 'enforce') - def test_create_cluster_template_public_fail(self, mock_policy, - mock_image_data): - with mock.patch.object(self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template): - # make policy enforcement fail - mock_policy.return_value = False - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data(public=True) - self.assertRaises(AppError, self.post_json, '/clustertemplates', - bdict) - - @mock.patch('magnum.api.attr_validator.validate_image') - @mock.patch.object(magnum_policy, 'enforce') - def test_create_cluster_template_public_not_set(self, mock_policy, - mock_image_data): - with mock.patch.object( - self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template) as cc_mock: - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data(public=False) - response = self.post_json('/clustertemplates', bdict) - self.assertFalse(response.json['public']) - # policy enforcement is called only once for enforce_wsgi - self.assertEqual(1, mock_policy.call_count) - cc_mock.assert_called_once_with(mock.ANY) - self.assertNotIn('id', cc_mock.call_args[0][0]) - self.assertFalse(cc_mock.call_args[0][0]['public']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_no_os_distro_image(self, - mock_image_data): - mock_image_data.side_effect = exception.OSDistroFieldNotFound('img') - bdict = apiutils.cluster_template_post_data() - del bdict['uuid'] - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(400, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_os_distro_image(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - del bdict['uuid'] - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(201, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_image_name(self, - mock_image_data): - mock_image = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - mock_image_data.return_value = mock_image - bdict = apiutils.cluster_template_post_data() - del bdict['uuid'] - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(201, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_no_exist_image_name(self, - mock_image_data): - mock_image_data.side_effect = exception.ResourceNotFound('test-img') - bdict = apiutils.cluster_template_post_data() - del bdict['uuid'] - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(404, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_multi_image_name(self, - mock_image_data): - mock_image_data.side_effect = exception.Conflict('Multiple images') - bdict = apiutils.cluster_template_post_data() - del bdict['uuid'] - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(409, response.status_int) - - def test_create_cluster_template_without_image_id(self): - bdict = apiutils.cluster_template_post_data() - del bdict['image_id'] - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(400, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_without_keypair_id(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - del bdict['keypair_id'] - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(201, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_dns(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(201, response.status_int) - self.assertEqual(bdict['dns_nameserver'], - response.json['dns_nameserver']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_no_exist_keypair(self, - mock_image_data): - self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test") - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(404, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_flavor(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(201, response.status_int) - self.assertEqual(bdict['flavor_id'], - response.json['flavor_id']) - self.assertEqual(bdict['master_flavor_id'], - response.json['master_flavor_id']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_no_exist_flavor(self, - mock_image_data): - self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor") - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(400, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_with_external_network(self, - mock_image_data): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - response = self.post_json('/clustertemplates', bdict) - self.assertEqual(201, response.status_int) - self.assertEqual(bdict['external_network_id'], - response.json['external_network_id']) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_no_exist_external_network( - self, mock_image_data): - self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound( - "test") - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - response = self.post_json('/clustertemplates', bdict, - expect_errors=True) - self.assertEqual(400, response.status_int) - - @mock.patch('magnum.api.attr_validator.validate_image') - def test_create_cluster_template_without_name(self, mock_image_data): - with mock.patch.object(self.dbapi, 'create_cluster_template', - wraps=self.dbapi.create_cluster_template): - mock_image_data.return_value = {'name': 'mock_name', - 'os_distro': 'fedora-atomic'} - bdict = apiutils.cluster_template_post_data() - bdict.pop('name') - resp = self.post_json('/clustertemplates', bdict) - self.assertEqual(201, resp.status_int) - self.assertIsNotNone(resp.json['name']) - - -class TestDelete(api_base.FunctionalTest): - - def test_delete_cluster_template(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - self.delete('/clustertemplates/%s' % cluster_template.uuid) - response = self.get_json('/clustertemplates/%s' % - cluster_template.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_cluster_template_with_cluster(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - obj_utils.create_test_cluster( - self.context, cluster_template_id=cluster_template.uuid) - response = self.delete('/clustertemplates/%s' % cluster_template.uuid, - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - self.assertIn(cluster_template.uuid, - response.json['errors'][0]['detail']) - - def test_delete_cluster_template_not_found(self): - uuid = uuidutils.generate_uuid() - response = self.delete('/clustertemplates/%s' % uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_cluster_template_with_name(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - response = self.delete('/clustertemplates/%s' % - cluster_template['name'], - expect_errors=True) - self.assertEqual(204, response.status_int) - - def test_delete_cluster_template_with_name_not_found(self): - response = self.delete('/clustertemplates/not_found', - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_delete_multiple_cluster_template_by_name(self): - obj_utils.create_test_cluster_template(self.context, - name='test_cluster_template', - uuid=uuidutils.generate_uuid()) - obj_utils.create_test_cluster_template(self.context, - name='test_cluster_template', - uuid=uuidutils.generate_uuid()) - response = self.delete('/clustertemplates/test_cluster_template', - expect_errors=True) - self.assertEqual(409, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - -class TestClusterTemplatePolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({rule: "project:non_fake"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "cluster_template:get_all", self.get_json, '/clustertemplates', - expect_errors=True) - - def test_policy_disallow_get_one(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - self._common_policy_check( - "cluster_template:get", self.get_json, - '/clustertemplates/%s' % cluster_template.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "cluster_template:detail", self.get_json, - '/clustertemplates/%s/detail' % uuidutils.generate_uuid(), - expect_errors=True) - - def test_policy_disallow_update(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, - name='example_A', - uuid=uuidutils.generate_uuid()) - self._common_policy_check( - "cluster_template:update", self.patch_json, - '/clustertemplates/%s' % cluster_template.name, - [{'path': '/name', 'value': "new_name", 'op': 'replace'}], - expect_errors=True) - - def test_policy_disallow_create(self): - bdict = apiutils.cluster_template_post_data( - name='cluster_model_example_A') - self._common_policy_check( - "cluster_template:create", self.post_json, '/clustertemplates', - bdict, expect_errors=True) - - def test_policy_disallow_delete(self): - cluster_template = obj_utils.create_test_cluster_template(self.context) - self._common_policy_check( - "cluster_template:delete", self.delete, - '/clustertemplates/%s' % cluster_template.uuid, expect_errors=True) - - def _owner_check(self, rule, func, *args, **kwargs): - self.policy.set_rules({rule: "user_id:%(user_id)s"}) - response = func(*args, **kwargs) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_only_owner_get_one(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, - user_id='another') - self._owner_check("cluster_template:get", self.get_json, - '/clustertemplates/%s' % cluster_template.uuid, - expect_errors=True) - - def test_policy_only_owner_update(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, - user_id='another') - self._owner_check( - "cluster_template:update", self.patch_json, - '/clustertemplates/%s' % cluster_template.uuid, - [{'path': '/name', 'value': "new_name", 'op': 'replace'}], - expect_errors=True) - - def test_policy_only_owner_delete(self): - cluster_template = obj_utils.create_test_cluster_template( - self.context, - user_id='another') - self._owner_check( - "cluster_template:delete", self.delete, - '/clustertemplates/%s' % cluster_template.uuid, - expect_errors=True) diff --git a/magnum/tests/unit/api/controllers/v1/test_magnum_service.py b/magnum/tests/unit/api/controllers/v1/test_magnum_service.py deleted file mode 100644 index d6ca6843..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_magnum_service.py +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from magnum.api.controllers.v1 import magnum_services as mservice -from magnum.api import servicegroup as servicegroup -from magnum import objects -from magnum.tests import base -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.api import utils as apiutils - - -class TestMagnumServiceObject(base.TestCase): - - def setUp(self): - super(TestMagnumServiceObject, self).setUp() - self.rpc_dict = apiutils.mservice_get_data() - - def test_msvc_obj_fields_filtering(self): - """Test that it does filtering fields """ - self.rpc_dict['fake-key'] = 'fake-value' - msvco = mservice.MagnumService("up", **self.rpc_dict) - self.assertNotIn('fake-key', msvco.fields) - - -class db_rec(object): - - def __init__(self, d): - self.rec_as_dict = d - - def as_dict(self): - return self.rec_as_dict - - -class TestMagnumServiceController(api_base.FunctionalTest): - - def test_empty(self): - response = self.get_json('/mservices') - self.assertEqual([], response['mservices']) - - def _rpc_api_reply(self, count=1): - reclist = [] - for i in range(count): - elem = apiutils.mservice_get_data() - elem['id'] = i + 1 - rec = db_rec(elem) - reclist.append(rec) - return reclist - - @mock.patch.object(objects.MagnumService, 'list') - @mock.patch.object(servicegroup.ServiceGroup, 'service_is_up') - def test_get_one(self, svc_up, rpc_patcher): - rpc_patcher.return_value = self._rpc_api_reply() - svc_up.return_value = "up" - - response = self.get_json('/mservices') - self.assertEqual(1, len(response['mservices'])) - self.assertEqual(1, response['mservices'][0]['id']) - - @mock.patch.object(objects.MagnumService, 'list') - @mock.patch.object(servicegroup.ServiceGroup, 'service_is_up') - def test_get_many(self, svc_up, rpc_patcher): - svc_num = 5 - rpc_patcher.return_value = self._rpc_api_reply(svc_num) - svc_up.return_value = "up" - - response = self.get_json('/mservices') - self.assertEqual(svc_num, len(response['mservices'])) - for i in range(svc_num): - elem = response['mservices'][i] - self.assertEqual(i + 1, elem['id']) - - -class TestMagnumServiceEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({rule: 'project:non_fake'}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - response.json['errors'][0]['detail']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - 'magnum-service:get_all', self.get_json, - '/mservices', expect_errors=True) diff --git a/magnum/tests/unit/api/controllers/v1/test_quota.py b/magnum/tests/unit/api/controllers/v1/test_quota.py deleted file mode 100644 index b3d776e7..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_quota.py +++ /dev/null @@ -1,285 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from keystoneauth1 import exceptions as ka_exception - -from magnum.api.controllers.v1 import quota as api_quota -from magnum.common import clients -from magnum.tests import base -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.api import utils as apiutils -from magnum.tests.unit.objects import utils as obj_utils - - -class TestQuotaObject(base.TestCase): - def test_quota_init(self): - quota_dict = apiutils.quota_post_data() - del quota_dict['hard_limit'] - quota = api_quota.Quota(**quota_dict) - self.assertEqual(1, quota.hard_limit) - - -class TestQuota(api_base.FunctionalTest): - _quota_attrs = ("project_id", "resource", "hard_limit") - - def setUp(self): - super(TestQuota, self).setUp() - - def test_empty(self): - response = self.get_json('/quotas') - self.assertEqual([], response['quotas']) - - def test_one(self): - quota = obj_utils.create_test_quota(self.context) - response = self.get_json('/quotas') - self.assertEqual(quota.project_id, response['quotas'][0]["project_id"]) - self._verify_attrs(self._quota_attrs, response['quotas'][0]) - - def test_get_one(self): - quota = obj_utils.create_test_quota(self.context) - response = self.get_json('/quotas/%s/%s' % (quota['project_id'], - quota['resource'])) - self.assertEqual(quota.project_id, response['project_id']) - self.assertEqual(quota.resource, response['resource']) - - def test_get_one_not_found(self): - response = self.get_json( - '/quotas/fake_project/invalid_res', - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - def test_get_one_not_authorized(self): - obj_utils.create_test_quota(self.context) - response = self.get_json( - '/quotas/invalid_proj/invalid_res', - expect_errors=True) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_get_all_admin_all_tenants(self, mock_context, mock_policy): - mock_context.return_value = self.context - quota_list = [] - for i in range(4): - quota = obj_utils.create_test_quota(self.context, - project_id="proj-id-"+str(i)) - quota_list.append(quota) - - self.context.is_admin = True - response = self.get_json('/quotas?all_tenants=True') - self.assertEqual(4, len(response['quotas'])) - expected = [r.project_id for r in quota_list] - res_proj_ids = [r['project_id'] for r in response['quotas']] - self.assertEqual(sorted(expected), sorted(res_proj_ids)) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_get_all_with_non_admin_context(self, mock_context, mock_policy): - mock_context.return_value = self.context - quota_list = [] - for i in range(4): - quota = obj_utils.create_test_quota(self.context, - project_id="proj-id-"+str(i)) - quota_list.append(quota) - - self.context.is_admin = False - response = self.get_json('/quotas?all_tenants=True') - self.assertEqual(0, len(response['quotas'])) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_get_all_admin_not_all_tenants(self, mock_context, mock_policy): - mock_context.return_value = self.context - quota_list = [] - for i in range(4): - quota = obj_utils.create_test_quota(self.context, - project_id="proj-id-"+str(i)) - quota_list.append(quota) - - self.context.is_admin = True - self.context.project_id = 'proj-id-1' - response = self.get_json('/quotas') - self.assertEqual(1, len(response['quotas'])) - self.assertEqual('proj-id-1', response['quotas'][0]['project_id']) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_get_all_with_pagination_limit(self, mock_context, - mock_policy): - mock_context.return_value = self.context - quota_list = [] - for i in range(4): - quota = obj_utils.create_test_quota(self.context, - project_id="proj-id-"+str(i)) - quota_list.append(quota) - - self.context.is_admin = True - response = self.get_json('/quotas?limit=2&all_tenants=True') - self.assertEqual(2, len(response['quotas'])) - expected = [r.project_id for r in quota_list[:2]] - res_proj_ids = [r['project_id'] for r in response['quotas']] - self.assertEqual(sorted(expected), sorted(res_proj_ids)) - self.assertTrue('http://localhost/v1/quotas?' in response['next']) - self.assertTrue('sort_key=id' in response['next']) - self.assertTrue('sort_dir=asc' in response['next']) - self.assertTrue('limit=2' in response['next']) - self.assertTrue('marker=%s' % quota_list[1].id in response['next']) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_get_all_admin_all_with_pagination_marker(self, mock_context, - mock_policy): - mock_context.return_value = self.context - quota_list = [] - for i in range(4): - quota = obj_utils.create_test_quota(self.context, - project_id="proj-id-"+str(i)) - quota_list.append(quota) - - self.context.is_admin = True - response = self.get_json('/quotas?limit=3&marker=%s&all_tenants=True' - % quota_list[2].id) - self.assertEqual(1, len(response['quotas'])) - self.assertEqual(quota_list[-1].project_id, - response['quotas'][0]['project_id']) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_get_all_admin_all_tenants_false(self, mock_context, mock_policy): - mock_context.return_value = self.context - quota_list = [] - for i in range(4): - quota = obj_utils.create_test_quota(self.context, - project_id="proj-id-"+str(i)) - quota_list.append(quota) - - self.context.is_admin = True - self.context.project_id = 'proj-id-1' - response = self.get_json('/quotas?all_tenants=False') - self.assertEqual(1, len(response['quotas'])) - self.assertEqual('proj-id-1', response['quotas'][0]['project_id']) - - def test_get_all_non_admin(self): - quota_list = [] - for i in range(4): - quota = obj_utils.create_test_quota(self.context, - project_id="proj-id-"+str(i)) - quota_list.append(quota) - - headers = {'X-Project-Id': 'proj-id-2'} - response = self.get_json('/quotas', headers=headers) - self.assertEqual(1, len(response['quotas'])) - self.assertEqual('proj-id-2', response['quotas'][0]['project_id']) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_create_quota(self, mock_keystone): - quota_dict = apiutils.quota_post_data() - response = self.post_json('/quotas', quota_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(quota_dict['project_id'], response.json['project_id']) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_create_quota_project_id_not_found(self, mock_keystone): - keystone = mock.MagicMock() - exp = ka_exception.http.NotFound() - keystone.domain_admin_client.projects .get.side_effect = exp - mock_keystone.return_value = keystone - quota_dict = apiutils.quota_post_data() - response = self.post_json('/quotas', quota_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(404, response.status_int) - self.assertTrue(response.json['errors']) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_create_quota_invalid_resource(self, mock_keystone): - quota_dict = apiutils.quota_post_data() - quota_dict['resource'] = 'invalid-res' - response = self.post_json('/quotas', quota_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_create_quota_invalid_hard_limit(self, mock_keystone): - quota_dict = apiutils.quota_post_data() - quota_dict['hard_limit'] = -10 - response = self.post_json('/quotas', quota_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_create_quota_no_project_id(self, mock_keystone): - quota_dict = apiutils.quota_post_data() - del quota_dict['project_id'] - response = self.post_json('/quotas', quota_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['errors']) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_patch_quota(self, mock_keystone): - quota_dict = apiutils.quota_post_data(hard_limit=5) - response = self.post_json('/quotas', quota_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(quota_dict['project_id'], response.json['project_id']) - self.assertEqual(5, response.json['hard_limit']) - - quota_dict['hard_limit'] = 20 - response = self.patch_json('/quotas', quota_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(202, response.status_int) - self.assertEqual(20, response.json['hard_limit']) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_patch_quota_not_found(self, mock_keystone): - quota_dict = apiutils.quota_post_data() - response = self.post_json('/quotas', quota_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - - # update quota with non-existing project id - update_dict = {'project_id': 'not-found', - 'hard_limit': 20, - 'resource': 'Cluster'} - response = self.patch_json('/quotas', update_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(404, response.status_int) - self.assertTrue(response.json['errors']) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_delete_quota(self, mock_keystone): - quota_dict = apiutils.quota_post_data() - response = self.post_json('/quotas', quota_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - - project_id = quota_dict['project_id'] - resource = quota_dict['resource'] - # delete quota - self.delete('/quotas/%s/%s' % (project_id, resource)) - - # now check that quota does not exist - response = self.get_json( - '/quotas/%s/%s' % (project_id, resource), - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['errors']) diff --git a/magnum/tests/unit/api/controllers/v1/test_stats.py b/magnum/tests/unit/api/controllers/v1/test_stats.py deleted file mode 100644 index 1cb1b2e8..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_stats.py +++ /dev/null @@ -1,130 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from webtest.app import AppError - -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.objects import utils as obj_utils - - -class TestStatsController(api_base.FunctionalTest): - - def setUp(self): - self.base_headers = {'OpenStack-API-Version': 'container-infra 1.4'} - super(TestStatsController, self).setUp() - obj_utils.create_test_cluster_template(self.context) - - def test_empty(self): - response = self.get_json('/stats', headers=self.base_headers) - expected = {u'clusters': 0, u'nodes': 0} - self.assertEqual(expected, response) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_admin_get_all_stats(self, mock_context, mock_policy): - obj_utils.create_test_cluster(self.context, - project_id=123, - uuid='uuid1') - obj_utils.create_test_cluster(self.context, - project_id=234, - uuid='uuid2') - response = self.get_json('/stats', headers=self.base_headers) - expected = {u'clusters': 2, u'nodes': 12} - self.assertEqual(expected, response) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_admin_get_tenant_stats(self, mock_context, mock_policy): - obj_utils.create_test_cluster(self.context, - project_id=123, - uuid='uuid1') - obj_utils.create_test_cluster(self.context, - project_id=234, - uuid='uuid2') - self.context.is_admin = True - response = self.get_json('/stats?project_id=234', - headers=self.base_headers) - expected = {u'clusters': 1, u'nodes': 6} - self.assertEqual(expected, response) - - @mock.patch("magnum.common.policy.enforce") - @mock.patch("magnum.common.context.make_context") - def test_admin_get_invalid_tenant_stats(self, mock_context, mock_policy): - obj_utils.create_test_cluster(self.context, - project_id=123, - uuid='uuid1') - obj_utils.create_test_cluster(self.context, - project_id=234, - uuid='uuid2') - self.context.is_admin = True - response = self.get_json('/stats?project_id=34', - headers=self.base_headers) - expected = {u'clusters': 0, u'nodes': 0} - self.assertEqual(expected, response) - - def test_get_self_stats(self): - obj_utils.create_test_cluster(self.context, - project_id=123, - uuid='uuid1') - obj_utils.create_test_cluster(self.context, - project_id=234, - uuid='uuid2', - node_count=5, - master_count=1) - headers = self.base_headers.copy() - headers['X-Project-Id'] = '234' - response = self.get_json('/stats', - headers=headers) - expected = {u'clusters': 1, u'nodes': 6} - self.assertEqual(expected, response) - - def test_get_self_stats_without_param(self): - obj_utils.create_test_cluster(self.context, - project_id=123, - uuid='uuid1') - obj_utils.create_test_cluster(self.context, - project_id=234, - uuid='uuid2', - node_count=5, - master_count=1) - headers = self.base_headers.copy() - headers['X-Project-Id'] = '234' - response = self.get_json('/stats', - headers=headers) - expected = {u'clusters': 1, u'nodes': 6} - self.assertEqual(expected, response) - - def test_get_some_other_user_stats(self): - obj_utils.create_test_cluster(self.context, - project_id=123, - uuid='uuid1') - obj_utils.create_test_cluster(self.context, - project_id=234, - uuid='uuid2', - node_count=5) - headers = self.base_headers.copy() - headers['X-Project-Id'] = '234' - self.assertRaises(AppError, - self.get_json, - '/stats?project_id=123', - headers=headers) - - def test_get_invalid_type_stats(self): - obj_utils.create_test_cluster(self.context, - project_id=123, - uuid='uuid1') - self.assertRaises(AppError, - self.get_json, - '/stats?project_id=123&type=invalid', - headers=self.base_headers) diff --git a/magnum/tests/unit/api/controllers/v1/test_types.py b/magnum/tests/unit/api/controllers/v1/test_types.py deleted file mode 100644 index 3cfadbd2..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_types.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils import uuidutils - -import mock -import six -import webtest -import wsme -from wsme import types as wtypes - -from magnum.api.controllers.v1 import types -from magnum.common import exception -from magnum.common import utils -from magnum.tests.unit.api import base - - -class TestMacAddressType(base.FunctionalTest): - - def test_valid_mac_addr(self): - test_mac = 'aa:bb:cc:11:22:33' - with mock.patch.object(utils, 'validate_and_normalize_mac') as m_mock: - types.MacAddressType.validate(test_mac) - m_mock.assert_called_once_with(test_mac) - - def test_invalid_mac_addr(self): - self.assertRaises(exception.InvalidMAC, - types.MacAddressType.validate, 'invalid-mac') - - def test_frombasetype(self): - test_mac = 'aa:bb:cc:11:22:33' - with mock.patch.object(utils, 'validate_and_normalize_mac') as m_mock: - types.MacAddressType.frombasetype(test_mac) - m_mock.assert_called_once_with(test_mac) - - def test_frombasetype_no_value(self): - test_mac = None - self.assertIsNone(types.MacAddressType.frombasetype(test_mac)) - - -class TestUuidType(base.FunctionalTest): - - def test_valid_uuid(self): - test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' - with mock.patch.object(uuidutils, 'is_uuid_like') as uuid_mock: - types.UuidType.validate(test_uuid) - uuid_mock.assert_called_once_with(test_uuid) - - def test_invalid_uuid(self): - self.assertRaises(exception.InvalidUUID, - types.UuidType.validate, 'invalid-uuid') - - -class MyBaseType(object): - """Helper class, patched by objects of type MyPatchType""" - mandatory = wsme.wsattr(wtypes.text, mandatory=True) - - -class MyPatchType(types.JsonPatchType): - """Helper class for TestJsonPatchType tests.""" - _api_base = MyBaseType - _extra_non_removable_attrs = {'/non_removable'} - - @staticmethod - def internal_attrs(): - return ['/internal'] - - -class MyRoot(wsme.WSRoot): - """Helper class for TestJsonPatchType tests.""" - - @wsme.expose([wsme.types.text], body=[MyPatchType]) - @wsme.validate([MyPatchType]) - def test(self, patch): - return patch - - -class TestJsonPatchType(base.FunctionalTest): - - def setUp(self): - super(TestJsonPatchType, self).setUp() - self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp()) - - def _patch_json(self, params, expect_errors=False): - return self.app.patch_json( - '/test', params=params, - headers={'Accept': 'application/json'}, - expect_errors=expect_errors) - - def test_valid_patches(self): - valid_patches = [{'path': '/extra/foo', 'op': 'remove'}, - {'path': '/extra/foo', 'op': 'add', 'value': 'bar'}, - {'path': '/foo', 'op': 'replace', 'value': 'bar'}] - ret = self._patch_json(valid_patches, False) - self.assertEqual(200, ret.status_int) - self.assertEqual(sorted(valid_patches, key=lambda k: k['op']), - sorted(ret.json, key=lambda k: k['op'])) - - def test_cannot_update_internal_attr(self): - patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - - def test_cannot_remove_internal_attr(self): - patch = [{'path': '/internal', 'op': 'remove'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - - def test_cannot_add_internal_attr(self): - patch = [{'path': '/internal', 'op': 'add', 'value': 'foo'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - - def test_update_mandatory_attr(self): - patch = [{'path': '/mandatory', 'op': 'replace', 'value': 'foo'}] - ret = self._patch_json(patch, False) - self.assertEqual(200, ret.status_int) - self.assertEqual(patch, ret.json) - - def test_cannot_remove_mandatory_attr(self): - patch = [{'path': '/mandatory', 'op': 'remove'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - - def test_cannot_remove_extra_non_removable_attr(self): - patch = [{'path': '/non_removable', 'op': 'remove'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_missing_required_fields_path(self): - missing_path = [{'op': 'remove'}] - ret = self._patch_json(missing_path, True) - self.assertEqual(400, ret.status_int) - - def test_missing_required_fields_op(self): - missing_op = [{'path': '/foo'}] - ret = self._patch_json(missing_op, True) - self.assertEqual(400, ret.status_int) - - def test_invalid_op(self): - patch = [{'path': '/foo', 'op': 'invalid'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - - def test_invalid_path(self): - patch = [{'path': 'invalid-path', 'op': 'remove'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - - def test_cannot_add_with_no_value(self): - patch = [{'path': '/extra/foo', 'op': 'add'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - - def test_cannot_replace_with_no_value(self): - patch = [{'path': '/foo', 'op': 'replace'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - - -class TestMultiType(base.FunctionalTest): - - def test_valid_values(self): - vt = types.MultiType(wsme.types.text, six.integer_types) - value = vt.validate("hello") - self.assertEqual("hello", value) - value = vt.validate(10) - self.assertEqual(10, value) - - vt = types.MultiType(types.UuidType, types.NameType) - value = vt.validate('name') - self.assertEqual('name', value) - uuid = "437319e3-d10f-49ec-84c8-e4abb6118c29" - value = vt.validate(uuid) - self.assertEqual(uuid, value) - - vt = types.MultiType(types.UuidType, six.integer_types) - value = vt.validate(10) - self.assertEqual(10, value) - value = vt.validate(uuid) - self.assertEqual(uuid, value) - - def test_invalid_values(self): - vt = types.MultiType(wsme.types.text, six.integer_types) - self.assertRaises(ValueError, vt.validate, 0.10) - self.assertRaises(ValueError, vt.validate, object()) - - vt = types.MultiType(types.UuidType, six.integer_types) - self.assertRaises(ValueError, vt.validate, 'abc') - self.assertRaises(ValueError, vt.validate, 0.10) - - def test_multitype_tostring(self): - vt = types.MultiType(str, int) - vts = str(vt) - self.assertIn(str(str), vts) - self.assertIn(str(int), vts) - - -class TestBooleanType(base.FunctionalTest): - - def test_valid_true_values(self): - v = types.BooleanType() - self.assertTrue(v.validate("true")) - self.assertTrue(v.validate("TRUE")) - self.assertTrue(v.validate("True")) - self.assertTrue(v.validate("t")) - self.assertTrue(v.validate("1")) - self.assertTrue(v.validate("y")) - self.assertTrue(v.validate("yes")) - self.assertTrue(v.validate("on")) - - def test_valid_false_values(self): - v = types.BooleanType() - self.assertFalse(v.validate("false")) - self.assertFalse(v.validate("FALSE")) - self.assertFalse(v.validate("False")) - self.assertFalse(v.validate("f")) - self.assertFalse(v.validate("0")) - self.assertFalse(v.validate("n")) - self.assertFalse(v.validate("no")) - self.assertFalse(v.validate("off")) - - def test_invalid_value(self): - v = types.BooleanType() - self.assertRaises(exception.Invalid, v.validate, "invalid-value") - self.assertRaises(exception.Invalid, v.validate, "01") - - def test_frombasetype_no_value(self): - v = types.BooleanType() - self.assertIsNone(v.frombasetype(None)) - - -class TestNameType(base.FunctionalTest): - - def test_valid_name(self): - self.assertEqual('name', types.NameType.validate('name')) - self.assertEqual(1234, types.NameType.validate(1234)) - - def test_invalid_name(self): - self.assertRaises(exception.InvalidName, types.NameType.validate, None) - self.assertRaises(exception.InvalidName, types.NameType.validate, '') - - def test_frombasetype_no_value(self): - self.assertEqual('name', types.NameType.frombasetype('name')) - self.assertEqual(1234, types.NameType.frombasetype(1234)) - - def test_frombasetype(self): - self.assertIsNone(types.NameType.frombasetype(None)) diff --git a/magnum/tests/unit/api/controllers/v1/test_utils.py b/magnum/tests/unit/api/controllers/v1/test_utils.py deleted file mode 100644 index 6d670e99..00000000 --- a/magnum/tests/unit/api/controllers/v1/test_utils.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonpatch -import mock -from oslo_utils import uuidutils -import wsme - -from magnum.api import utils -from magnum.common import exception -import magnum.conf -from magnum.tests.unit.api import base - - -CONF = magnum.conf.CONF - - -class TestApiUtils(base.FunctionalTest): - - def test_validate_limit(self): - limit = utils.validate_limit(10) - self.assertEqual(10, 10) - - # max limit - limit = utils.validate_limit(999999999) - self.assertEqual(CONF.api.max_limit, limit) - - # negative - self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1) - - # zero - self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0) - - def test_validate_sort_dir(self): - sort_dir = utils.validate_sort_dir('asc') - self.assertEqual('asc', sort_dir) - - # invalid sort_dir parameter - self.assertRaises(wsme.exc.ClientSideError, - utils.validate_sort_dir, - 'fake-sort') - - @mock.patch('pecan.request') - @mock.patch('magnum.objects.Cluster.get_by_name') - @mock.patch('magnum.objects.Cluster.get_by_uuid') - def test_get_resource_with_uuid( - self, - mock_get_by_uuid, - mock_get_by_name, - mock_request): - mock_cluster = mock.MagicMock - mock_get_by_uuid.return_value = mock_cluster - uuid = uuidutils.generate_uuid() - - returned_cluster = utils.get_resource('Cluster', uuid) - - mock_get_by_uuid.assert_called_once_with(mock_request.context, uuid) - self.assertFalse(mock_get_by_name.called) - self.assertEqual(mock_cluster, returned_cluster) - - @mock.patch('pecan.request') - @mock.patch('magnum.objects.Cluster.get_by_name') - @mock.patch('magnum.objects.Cluster.get_by_uuid') - def test_get_resource_with_name( - self, - mock_get_by_uuid, - mock_get_by_name, - mock_request): - mock_cluster = mock.MagicMock - mock_get_by_name.return_value = mock_cluster - - returned_cluster = utils.get_resource('Cluster', 'fake-name') - - self.assertFalse(mock_get_by_uuid.called) - mock_get_by_name.assert_called_once_with(mock_request.context, - 'fake-name') - self.assertEqual(mock_cluster, returned_cluster) - - @mock.patch.object(uuidutils, 'is_uuid_like', return_value=True) - def test_get_openstack_resource_by_uuid(self, fake_is_uuid_like): - fake_manager = mock.MagicMock() - fake_manager.get.return_value = 'fake_resource_data' - resource_data = utils.get_openstack_resource(fake_manager, - 'fake_resource', - 'fake_resource_type') - self.assertEqual('fake_resource_data', resource_data) - - @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) - def test_get_openstack_resource_by_name(self, fake_is_uuid_like): - fake_manager = mock.MagicMock() - fake_manager.list.return_value = ['fake_resource_data'] - resource_data = utils.get_openstack_resource(fake_manager, - 'fake_resource', - 'fake_resource_type') - self.assertEqual('fake_resource_data', resource_data) - - @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) - def test_get_openstack_resource_non_exist(self, fake_is_uuid_like): - fake_manager = mock.MagicMock() - fake_manager.list.return_value = [] - self.assertRaises(exception.ResourceNotFound, - utils.get_openstack_resource, - fake_manager, 'fake_resource', 'fake_resource_type') - - @mock.patch.object(uuidutils, 'is_uuid_like', return_value=False) - def test_get_openstack_resource_multi_exist(self, fake_is_uuid_like): - fake_manager = mock.MagicMock() - fake_manager.list.return_value = ['fake_resource_data1', - 'fake_resource_data2'] - self.assertRaises(exception.Conflict, - utils.get_openstack_resource, - fake_manager, 'fake_resource', 'fake_resource_type') - - @mock.patch.object(jsonpatch, 'apply_patch') - def test_apply_jsonpatch(self, mock_jsonpatch): - doc = {'cluster_uuid': 'id', 'node_count': 1} - patch = [{"path": "/node_count", "value": 2, "op": "replace"}] - utils.apply_jsonpatch(doc, patch) - mock_jsonpatch.assert_called_once_with(doc, patch) - - def test_apply_jsonpatch_add_attr_not_exist(self): - doc = {'cluster_uuid': 'id', 'node_count': 1} - patch = [{"path": "/fake", "value": 2, "op": "add"}] - exc = self.assertRaises(wsme.exc.ClientSideError, - utils.apply_jsonpatch, - doc, patch) - self.assertEqual( - "Adding a new attribute /fake to the root of the resource is " - "not allowed.", exc.faultstring) - - def test_apply_jsonpatch_add_attr_already_exist(self): - doc = {'cluster_uuid': 'id', 'node_count': 1} - patch = [{"path": "/node_count", "value": 2, "op": "add"}] - exc = self.assertRaises(wsme.exc.ClientSideError, - utils.apply_jsonpatch, - doc, patch) - - self.assertEqual( - "The attribute /node_count has existed, please use " - "'replace' operation instead.", exc.faultstring) - - def test_validate_docker_memory(self): - utils.validate_docker_memory('512m') - utils.validate_docker_memory('512g') - self.assertRaises(wsme.exc.ClientSideError, - utils.validate_docker_memory, "512gg") - # Docker require that Minimum memory limit >= 4M - self.assertRaises(wsme.exc.ClientSideError, - utils.validate_docker_memory, "3m") diff --git a/magnum/tests/unit/api/test_app.py b/magnum/tests/unit/api/test_app.py deleted file mode 100644 index 3b6f7abf..00000000 --- a/magnum/tests/unit/api/test_app.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from magnum.api import app as api_app -from magnum.api import config as api_config -from magnum.api import hooks -from magnum.tests import base - - -class TestAppConfig(base.BaseTestCase): - - def test_get_pecan_config(self): - config = api_app.get_pecan_config() - - config_d = dict(config.app) - - self.assertEqual(api_config.app['modules'], config_d['modules']) - self.assertEqual(api_config.app['root'], config_d['root']) - self.assertIsInstance(config_d['hooks'][0], hooks.ContextHook) diff --git a/magnum/tests/unit/api/test_attr_validator.py b/magnum/tests/unit/api/test_attr_validator.py deleted file mode 100644 index d6802682..00000000 --- a/magnum/tests/unit/api/test_attr_validator.py +++ /dev/null @@ -1,328 +0,0 @@ -# Copyright 2015 EasyStack, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from glanceclient import exc as glance_exception -import mock -from novaclient import exceptions as nova_exc - -from magnum.api import attr_validator -from magnum.common import exception -from magnum.tests import base - - -class TestAttrValidator(base.BaseTestCase): - - def test_validate_flavor_with_vaild_flavor(self): - mock_flavor = mock.MagicMock() - mock_flavor.name = 'test_flavor' - mock_flavor.id = 'test_flavor_id' - mock_flavors = [mock_flavor] - mock_nova = mock.MagicMock() - mock_nova.flavors.list.return_value = mock_flavors - mock_os_cli = mock.MagicMock() - mock_os_cli.nova.return_value = mock_nova - attr_validator.validate_flavor(mock_os_cli, 'test_flavor') - self.assertTrue(mock_nova.flavors.list.called) - - def test_validate_flavor_with_none_flavor(self): - mock_flavor = mock.MagicMock() - mock_flavor.name = 'test_flavor' - mock_flavor.id = 'test_flavor_id' - mock_flavors = [mock_flavor] - mock_nova = mock.MagicMock() - mock_nova.flavors.list.return_value = mock_flavors - mock_os_cli = mock.MagicMock() - mock_os_cli.nova.return_value = mock_nova - attr_validator.validate_flavor(mock_os_cli, None) - self.assertEqual(False, mock_nova.flavors.list.called) - - def test_validate_flavor_with_invalid_flavor(self): - mock_flavor = mock.MagicMock() - mock_flavor.name = 'test_flavor_not_equal' - mock_flavor.id = 'test_flavor_id_not_equal' - mock_flavors = [mock_flavor] - mock_nova = mock.MagicMock() - mock_nova.flavors.list.return_value = mock_flavors - mock_os_cli = mock.MagicMock() - mock_os_cli.nova.return_value = mock_nova - self.assertRaises(exception.FlavorNotFound, - attr_validator.validate_flavor, - mock_os_cli, 'test_flavor') - - def test_validate_external_network_with_valid_network(self): - mock_networks = {'networks': [{'name': 'test_ext_net', - 'id': 'test_ext_net_id'}]} - mock_neutron = mock.MagicMock() - mock_neutron.list_networks.return_value = mock_networks - mock_os_cli = mock.MagicMock() - mock_os_cli.neutron.return_value = mock_neutron - attr_validator.validate_external_network(mock_os_cli, 'test_ext_net') - self.assertTrue(mock_neutron.list_networks.called) - - def test_validate_external_network_with_multiple_valid_network(self): - mock_networks = {'networks': - [{'name': 'test_ext_net', 'id': 'test_ext_net_id1'}, - {'name': 'test_ext_net', 'id': 'test_ext_net_id2'}]} - mock_neutron = mock.MagicMock() - mock_neutron.list_networks.return_value = mock_networks - mock_os_cli = mock.MagicMock() - mock_os_cli.neutron.return_value = mock_neutron - self.assertRaises(exception.Conflict, - attr_validator.validate_external_network, - mock_os_cli, 'test_ext_net') - - def test_validate_external_network_with_invalid_network(self): - mock_networks = {'networks': [{'name': 'test_ext_net_not_equal', - 'id': 'test_ext_net_id_not_equal'}]} - mock_neutron = mock.MagicMock() - mock_neutron.list_networks.return_value = mock_networks - mock_os_cli = mock.MagicMock() - mock_os_cli.neutron.return_value = mock_neutron - self.assertRaises(exception.ExternalNetworkNotFound, - attr_validator.validate_external_network, - mock_os_cli, 'test_ext_net') - - def test_validate_keypair_with_valid_keypair(self): - mock_keypair = mock.MagicMock() - mock_keypair.id = 'test-keypair' - mock_nova = mock.MagicMock() - mock_nova.keypairs.get.return_value = mock_keypair - mock_os_cli = mock.MagicMock() - mock_os_cli.nova.return_value = mock_nova - attr_validator.validate_keypair(mock_os_cli, 'test-keypair') - - def test_validate_keypair_with_invalid_keypair(self): - mock_nova = mock.MagicMock() - mock_nova.keypairs.get.side_effect = nova_exc.NotFound('test-keypair') - mock_os_cli = mock.MagicMock() - mock_os_cli.nova.return_value = mock_nova - self.assertRaises(exception.KeyPairNotFound, - attr_validator.validate_keypair, - mock_os_cli, 'test_keypair') - - def test_validate_labels_main_no_label(self): - fake_labels = {} - attr_validator.validate_labels(fake_labels) - - def test_validate_labels_main_isolation_invalid_label(self): - fake_labels = {'mesos_slave_isolation': 'abc'} - self.assertRaises(exception.InvalidParameterValue, - attr_validator.validate_labels, - fake_labels) - - def test_validate_labels_isolation_valid(self): - fake_labels = {'mesos_slave_isolation': - 'filesystem/posix,filesystem/linux'} - attr_validator.validate_labels_isolation(fake_labels) - - def test_validate_labels_main_with_valid_providers_none_isolation(self): - fake_labels = {'mesos_slave_image_providers': 'docker'} - self.assertRaises(exception.RequiredParameterNotProvided, - attr_validator.validate_labels, - fake_labels) - - def test_validate_labels_with_valid_providers_invalid_isolation(self): - fake_labels = {'mesos_slave_image_providers': 'docker', - 'mesos_slave_isolation': 'abc'} - self.assertRaises(exception.RequiredParameterNotProvided, - attr_validator.validate_labels_image_providers, - fake_labels) - - def test_validate_labels_with_valid_providers_invalid_providers(self): - fake_labels = {'mesos_slave_image_providers': 'appc'} - attr_validator.validate_labels_image_providers(fake_labels) - - def test_validate_labels_with_invalid_providers(self): - fake_labels = {'mesos_slave_image_providers': 'abc'} - self.assertRaises(exception.InvalidParameterValue, - attr_validator.validate_labels_image_providers, - fake_labels) - - def test_validate_labels_with_valid_providers_none_isolation(self): - fake_labels = {'mesos_slave_image_providers': 'docker'} - self.assertRaises(exception.RequiredParameterNotProvided, - attr_validator.validate_labels_image_providers, - fake_labels) - - def test_validate_labels_with_valid_providers_valid_isolation(self): - fake_labels = {'mesos_slave_image_providers': 'docker', - 'mesos_slave_isolation': 'docker/runtime'} - attr_validator.validate_labels_image_providers(fake_labels) - - def test_validate_labels_with_environment_variables_valid_json(self): - contents = '{"step": "upgrade", "interface": "deploy"}' - fack_labels = {'mesos_slave_executor_env_variables': contents} - attr_validator.validate_labels_executor_env_variables( - fack_labels) - - def test_validate_labels_with_environment_variables_bad_json(self): - fack_labels = {'mesos_slave_executor_env_variables': 'step'} - self.assertRaisesRegex( - exception.InvalidParameterValue, - "Json format error", - attr_validator.validate_labels_executor_env_variables, - fack_labels) - - def test_validate_labels_with_valid_isolation(self): - fake_labels = {'mesos_slave_isolation': - 'filesystem/posix,filesystem/linux'} - attr_validator.validate_labels_isolation(fake_labels) - - def test_validate_labels_isolation_invalid(self): - fake_labels = {'mesos_slave_isolation': 'filesystem'} - self.assertRaises(exception.InvalidParameterValue, - attr_validator.validate_labels_isolation, - fake_labels) - - def test_validate_labels_strategy_valid(self): - fake_labels = {'swarm_strategy': 'spread'} - attr_validator.validate_labels_strategy(fake_labels) - - def test_validate_labels_strategy_missing(self): - fake_labels = {'strategy': 'spread'} - attr_validator.validate_labels_strategy(fake_labels) - - def test_validate_labels_strategy_invalid(self): - fake_labels = {'swarm_strategy': 'invalid'} - self.assertRaises(exception.InvalidParameterValue, - attr_validator.validate_labels_strategy, - fake_labels) - - @mock.patch('magnum.api.utils.get_openstack_resource') - def test_validate_image_with_valid_image_by_name(self, mock_os_res): - mock_image = {'name': 'fedora-21-atomic-5', - 'id': 'e33f0988-1730-405e-8401-30cbc8535302', - 'os_distro': 'fedora-atomic'} - mock_os_res.return_value = mock_image - mock_os_cli = mock.MagicMock() - attr_validator.validate_image(mock_os_cli, 'fedora-21-atomic-5') - self.assertTrue(mock_os_res.called) - - @mock.patch('magnum.api.utils.get_openstack_resource') - def test_validate_image_with_forbidden_image(self, mock_os_res): - def glance_side_effect(cli, image, name): - raise glance_exception.HTTPForbidden() - - mock_os_res.side_effect = glance_side_effect - mock_os_cli = mock.MagicMock() - self.assertRaises(exception.ImageNotAuthorized, - attr_validator.validate_image, mock_os_cli, - 'fedora-21-atomic-5') - - @mock.patch('magnum.api.utils.get_openstack_resource') - def test_validate_image_with_valid_image_by_id(self, mock_os_res): - mock_image = {'name': 'fedora-21-atomic-5', - 'id': 'e33f0988-1730-405e-8401-30cbc8535302', - 'os_distro': 'fedora-atomic'} - mock_os_res.return_value = mock_image - mock_os_cli = mock.MagicMock() - attr_validator.validate_image(mock_os_cli, - 'e33f0988-1730-405e-8401-30cbc8535302') - self.assertTrue(mock_os_res.called) - - @mock.patch('magnum.api.utils.get_openstack_resource') - def test_validate_image_with_nonexist_image_by_name(self, mock_os_res): - mock_os_res.side_effect = exception.ResourceNotFound - mock_os_cli = mock.MagicMock() - self.assertRaises(exception.ImageNotFound, - attr_validator.validate_image, - mock_os_cli, 'fedora-21-atomic-5') - - @mock.patch('magnum.api.utils.get_openstack_resource') - def test_validate_image_with_nonexist_image_by_id(self, mock_os_res): - mock_os_res.side_effect = glance_exception.NotFound - mock_os_cli = mock.MagicMock() - self.assertRaises(exception.ImageNotFound, - attr_validator.validate_image, - mock_os_cli, 'fedora-21-atomic-5') - - @mock.patch('magnum.api.utils.get_openstack_resource') - def test_validate_image_with_multi_images_same_name(self, mock_os_res): - mock_os_res.side_effect = exception.Conflict - mock_os_cli = mock.MagicMock() - self.assertRaises(exception.Conflict, - attr_validator.validate_image, - mock_os_cli, 'fedora-21-atomic-5') - - @mock.patch('magnum.api.utils.get_openstack_resource') - def test_validate_image_without_os_distro(self, mock_os_res): - mock_image = {'name': 'fedora-21-atomic-5', - 'id': 'e33f0988-1730-405e-8401-30cbc8535302'} - mock_os_res.return_value = mock_image - mock_os_cli = mock.MagicMock() - self.assertRaises(exception.OSDistroFieldNotFound, - attr_validator.validate_image, - mock_os_cli, 'fedora-21-atomic-5') - - @mock.patch('magnum.api.utils.get_openstack_resource') - def test_validate_image_when_user_forbidden(self, mock_os_res): - mock_image = {'name': 'fedora-21-atomic-5', - 'id': 'e33f0988-1730-405e-8401-30cbc8535302', - 'os_distro': ''} - mock_os_res.return_value = mock_image - mock_os_cli = mock.MagicMock() - self.assertRaises(exception.OSDistroFieldNotFound, - attr_validator.validate_image, - mock_os_cli, 'fedora-21-atomic-5') - - @mock.patch('magnum.common.clients.OpenStackClients') - def test_validate_os_resources_with_invalid_flavor(self, - mock_os_cli): - mock_cluster_template = {'flavor_id': 'test_flavor'} - mock_flavor = mock.MagicMock() - mock_flavor.name = 'test_flavor_not_equal' - mock_flavor.id = 'test_flavor_id_not_equal' - mock_flavors = [mock_flavor] - mock_nova = mock.MagicMock() - mock_nova.flavors.list.return_value = mock_flavors - mock_os_cli.nova.return_value = mock_nova - mock_context = mock.MagicMock() - self.assertRaises(exception.FlavorNotFound, - attr_validator.validate_os_resources, - mock_context, mock_cluster_template) - - @mock.patch('magnum.common.clients.OpenStackClients') - @mock.patch('magnum.api.attr_validator.validate_labels') - def test_validate_os_resources_with_label(self, mock_validate_labels, - mock_os_cli): - mock_cluster_template = {'labels': {'mesos_slave_isolation': 'abc'}} - mock_context = mock.MagicMock() - self.assertRaises(exception.InvalidParameterValue, - attr_validator.validate_os_resources, mock_context, - mock_cluster_template) - - @mock.patch('magnum.common.clients.OpenStackClients') - @mock.patch('magnum.api.attr_validator.validators') - def test_validate_os_resources_without_validator(self, mock_validators, - mock_os_cli): - mock_cluster_template = {} - mock_context = mock.MagicMock() - attr_validator.validate_os_resources(mock_context, - mock_cluster_template) - - @mock.patch('magnum.common.clients.OpenStackClients') - def test_validate_os_resources_with_cluster(self, mock_os_cli): - mock_cluster_template = {} - mock_cluster = {'keypair': 'test-keypair'} - mock_keypair = mock.MagicMock() - mock_keypair.id = 'test-keypair' - mock_nova = mock.MagicMock() - mock_nova.keypairs.get.return_value = mock_keypair - mock_os_cli = mock.MagicMock() - mock_os_cli.nova.return_value = mock_nova - mock_context = mock.MagicMock() - attr_validator.validate_os_resources(mock_context, - mock_cluster_template, - mock_cluster) diff --git a/magnum/tests/unit/api/test_expose.py b/magnum/tests/unit/api/test_expose.py deleted file mode 100644 index 54caf93c..00000000 --- a/magnum/tests/unit/api/test_expose.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from magnum.api import expose -from magnum.tests import base - - -class TestExpose(base.BaseTestCase): - - @mock.patch('wsmeext.pecan.wsexpose') - def test_expose_with_rest_content_types(self, mock_pecan): - self.assertTrue(expose.expose(rest_content_types='json')) - mock_pecan.assert_called_with(rest_content_types='json') - - @mock.patch('wsmeext.pecan.wsexpose') - def test_expose_without_rest_content_types(self, mock_pecan): - self.assertTrue(expose.expose()) - mock_pecan.assert_called_once_with(rest_content_types=('json',)) diff --git a/magnum/tests/unit/api/test_hooks.py b/magnum/tests/unit/api/test_hooks.py deleted file mode 100644 index f7b9a9aa..00000000 --- a/magnum/tests/unit/api/test_hooks.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2014 -# The Cloudscaling Group, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -import mock -from oslo_config import cfg -import oslo_messaging as messaging - -from magnum.api.controllers import root -from magnum.api import hooks -from magnum.common import context as magnum_context -from magnum.tests import base -from magnum.tests import fakes -from magnum.tests.unit.api import base as api_base - - -class TestContextHook(base.BaseTestCase): - - def setUp(self): - super(TestContextHook, self).setUp() - self.app = fakes.FakeApp() - - def test_context_hook_before_method(self): - state = mock.Mock(request=fakes.FakePecanRequest()) - hook = hooks.ContextHook() - hook.before(state) - ctx = state.request.context - self.assertIsInstance(ctx, magnum_context.RequestContext) - self.assertEqual(fakes.fakeAuthTokenHeaders['X-Auth-Token'], - ctx.auth_token) - self.assertEqual(fakes.fakeAuthTokenHeaders['X-Project-Id'], - ctx.project_id) - self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Name'], - ctx.user_name) - self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Id'], - ctx.user_id) - self.assertEqual(fakes.fakeAuthTokenHeaders['X-Roles'], - ','.join(ctx.roles)) - self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Domain-Name'], - ctx.domain_name) - self.assertEqual(fakes.fakeAuthTokenHeaders['X-User-Domain-Id'], - ctx.domain_id) - self.assertIsNone(ctx.auth_token_info) - - def test_context_hook_before_method_auth_info(self): - state = mock.Mock(request=fakes.FakePecanRequest()) - state.request.environ['keystone.token_info'] = 'assert_this' - hook = hooks.ContextHook() - hook.before(state) - ctx = state.request.context - self.assertIsInstance(ctx, magnum_context.RequestContext) - self.assertEqual(fakes.fakeAuthTokenHeaders['X-Auth-Token'], - ctx.auth_token) - self.assertEqual('assert_this', ctx.auth_token_info) - - -class TestNoExceptionTracebackHook(api_base.FunctionalTest): - - TRACE = [u'Traceback (most recent call last):', - u' File "/opt/stack/magnum/magnum/openstack/common/rpc/amqp.py",' - ' line 434, in _process_data\\n **args)', - u' File "/opt/stack/magnum/magnum/openstack/common/rpc/' - 'dispatcher.py", line 172, in dispatch\\n result =' - ' getattr(proxyobj, method)(context, **kwargs)'] - MSG_WITHOUT_TRACE = "Test exception message." - MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE) - - def setUp(self): - super(TestNoExceptionTracebackHook, self).setUp() - p = mock.patch.object(root.Root, 'convert') - self.root_convert_mock = p.start() - self.addCleanup(p.stop) - - def test_hook_exception_success(self): - self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = response.json['errors'][0]['detail'] - self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) - - def test_hook_remote_error_success(self): - test_exc_type = 'TestException' - self.root_convert_mock.side_effect = messaging.rpc.RemoteError( - test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - # NOTE(max_lobur): For RemoteError the client message will still have - # some garbage because in RemoteError traceback is serialized as a list - # instead of'\n'.join(trace). But since RemoteError is kind of very - # rare thing (happens due to wrong deserialization settings etc.) - # we don't care about this garbage. - if six.PY2: - expected_msg = ("Remote error: %s %s" - % (test_exc_type, self.MSG_WITHOUT_TRACE) - + "\n[u'") - else: - expected_msg = ("Remote error: %s %s" - % (test_exc_type, self.MSG_WITHOUT_TRACE) + "\n['") - actual_msg = response.json['errors'][0]['detail'] - self.assertEqual(expected_msg, actual_msg) - - def test_hook_without_traceback(self): - msg = "Error message without traceback \n but \n multiline" - self.root_convert_mock.side_effect = Exception(msg) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = response.json['errors'][0]['detail'] - self.assertEqual(msg, actual_msg) - - def test_hook_server_debug_on_serverfault(self): - cfg.CONF.set_override('debug', True) - self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = response.json['errors'][0]['detail'] - self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) - - def test_hook_server_debug_on_clientfault(self): - cfg.CONF.set_override('debug', True) - client_error = Exception(self.MSG_WITH_TRACE) - client_error.code = 400 - self.root_convert_mock.side_effect = client_error - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = response.json['errors'][0]['detail'] - self.assertEqual(self.MSG_WITH_TRACE, actual_msg) diff --git a/magnum/tests/unit/api/test_servicegroup.py b/magnum/tests/unit/api/test_servicegroup.py deleted file mode 100644 index 6db4a628..00000000 --- a/magnum/tests/unit/api/test_servicegroup.py +++ /dev/null @@ -1,126 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import mock -from oslo_utils import timeutils -import pytz - -from magnum.api import servicegroup as svc_grp -from magnum.tests.unit.api import base as api_base -from magnum.tests.unit.objects import utils as obj_util - - -class TestServiceGroup(api_base.FunctionalTest): - def setUp(self): - super(TestServiceGroup, self).setUp() - self.servicegroup_api = svc_grp.ServiceGroup() - - def test_service_is_up_check_type(self): - random_obj = mock.MagicMock() - self.assertRaises(TypeError, - self.servicegroup_api.service_is_up, random_obj) - - def test_service_is_up_forced_down(self): - kwarg = {'forced_down': True} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertFalse(is_up) - - def test_service_is_up_alive(self): - kwarg = {'last_seen_up': timeutils.utcnow(True)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertTrue(is_up) - - def test_service_is_up_alive_with_created(self): - kwarg = {'created_at': timeutils.utcnow(True)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertTrue(is_up) - - def test_service_is_up_alive_with_updated(self): - kwarg = {'updated_at': timeutils.utcnow(True)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertTrue(is_up) - - def test_service_is_up_alive_with_all_three(self): - kwarg = {'created_at': timeutils.utcnow(True), - 'updated_at': timeutils.utcnow(True), - 'last_seen_up': timeutils.utcnow(True)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertTrue(is_up) - - def test_service_is_up_alive_with_latest_update(self): - kwarg = {'created_at': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC), - 'updated_at': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC), - 'last_seen_up': timeutils.utcnow(True)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertTrue(is_up) - - def test_service_is_up_down(self): - kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertFalse(is_up) - - def test_service_is_up_down_with_create(self): - kwarg = {'created_at': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertFalse(is_up) - - def test_service_is_up_down_with_update(self): - kwarg = {'updated_at': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertFalse(is_up) - - def test_service_is_up_down_with_all_three(self): - kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC), - 'created_at': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC), - 'updated_at': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertFalse(is_up) - - def test_service_is_up_down_with_old_update(self): - kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1, - tzinfo=pytz.UTC), - 'created_at': timeutils.utcnow(True), - 'updated_at': timeutils.utcnow(True)} - magnum_object = obj_util.get_test_magnum_service_object( - self.context, **kwarg) - is_up = self.servicegroup_api.service_is_up(magnum_object) - self.assertFalse(is_up) diff --git a/magnum/tests/unit/api/test_validation.py b/magnum/tests/unit/api/test_validation.py deleted file mode 100644 index b1c1149a..00000000 --- a/magnum/tests/unit/api/test_validation.py +++ /dev/null @@ -1,366 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from six.moves import reload_module - -from magnum.api import validation as v -from magnum.common import exception -import magnum.conf -from magnum import objects -from magnum.tests import base -from magnum.tests.unit.objects import utils as obj_utils - -CONF = magnum.conf.CONF - - -class TestValidation(base.BaseTestCase): - - def _test_enforce_cluster_type_supported( - self, mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, - mock_pecan_request, cluster_type, assert_raised=False): - - @v.enforce_cluster_type_supported() - def test(self, cluster): - pass - - server_type, cluster_distro, coe = cluster_type - cluster_template = obj_utils.get_test_cluster_template( - mock_pecan_request.context, uuid='cluster_template_id', - coe=coe, cluster_distro=cluster_distro, server_type=server_type) - mock_cluster_template_get_by_uuid.return_value = cluster_template - - cluster = mock.MagicMock() - cluster.cluster_template_id = 'cluster_template_id' - cluster.cluster_template = cluster_template - mock_cluster_get_by_uuid.return_value = cluster - - if assert_raised: - return self.assertRaises( - exception.ClusterTypeNotSupported, test, self, cluster) - else: - self.assertIsNone(test(self, cluster)) - - @mock.patch('pecan.request') - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_enforce_cluster_type_supported( - self, mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, - mock_pecan_request): - - cluster_type = ('vm', 'fedora-atomic', 'kubernetes') - self._test_enforce_cluster_type_supported( - mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, - mock_pecan_request, cluster_type) - - @mock.patch('pecan.request') - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_enforce_cluster_type_not_supported( - self, mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, - mock_pecan_request): - - cluster_type = ('vm', 'foo', 'kubernetes') - exc = self._test_enforce_cluster_type_supported( - mock_cluster_template_get_by_uuid, mock_cluster_get_by_uuid, - mock_pecan_request, cluster_type, assert_raised=True) - self.assertEqual('Cluster type (vm, foo, kubernetes) not supported.', - exc.message) - - def _test_enforce_network_driver_types_create( - self, - network_driver_type, - network_driver_config_dict, - coe='kubernetes', - assert_raised=False): - - @v.enforce_network_driver_types_create() - def test(self, cluster_template): - pass - - for key, val in network_driver_config_dict.items(): - CONF.set_override(key, val, 'cluster_template') - - cluster_template = mock.MagicMock() - cluster_template.name = 'test_cluster_template' - cluster_template.network_driver = network_driver_type - cluster_template.coe = coe - - # Reload the validator module so that ClusterTemplate configs are - # re-evaluated. - reload_module(v) - validator = v.K8sValidator - validator.supported_network_drivers = ['flannel', 'type1', 'type2'] - - if assert_raised: - self.assertRaises(exception.InvalidParameterValue, - test, self, cluster_template) - else: - test(self, cluster_template) - return cluster_template - - def test_enforce_network_driver_types_one_allowed_create(self): - self._test_enforce_network_driver_types_create( - network_driver_type='type1', - network_driver_config_dict={ - 'kubernetes_allowed_network_drivers': ['type1']}) - - def test_enforce_network_driver_types_two_allowed_create(self): - self._test_enforce_network_driver_types_create( - network_driver_type='type1', - network_driver_config_dict={ - 'kubernetes_allowed_network_drivers': ['type1', 'type2']}) - - def test_enforce_network_driver_types_not_allowed_create(self): - self._test_enforce_network_driver_types_create( - network_driver_type='type1', - network_driver_config_dict={ - 'kubernetes_allowed_network_drivers': ['type2']}, - assert_raised=True) - - def test_enforce_network_driver_types_all_allowed_create(self): - for driver in ['flannel', 'type1', 'type2']: - self._test_enforce_network_driver_types_create( - network_driver_type=driver, - network_driver_config_dict={ - 'kubernetes_allowed_network_drivers': ['all']}) - - def test_enforce_network_driver_types_invalid_coe_create(self): - self._test_enforce_network_driver_types_create( - network_driver_type='flannel', - network_driver_config_dict={}, - coe='invalid_coe_type', - assert_raised=True) - - def test_enforce_network_driver_types_default_create(self): - cluster_template = self._test_enforce_network_driver_types_create( - network_driver_type=None, - network_driver_config_dict={}) - self.assertEqual('flannel', cluster_template.network_driver) - - def test_enforce_network_driver_types_default_config_create(self): - cluster_template = self._test_enforce_network_driver_types_create( - network_driver_type=None, - network_driver_config_dict={ - 'kubernetes_default_network_driver': 'type1'}) - self.assertEqual('type1', cluster_template.network_driver) - - def test_enforce_network_driver_types_default_invalid_create(self): - self._test_enforce_network_driver_types_create( - network_driver_type=None, - network_driver_config_dict={ - 'kubernetes_default_network_driver': 'invalid_driver'}, - assert_raised=True) - - @mock.patch('pecan.request') - @mock.patch('magnum.api.utils.get_resource') - def _test_enforce_network_driver_types_update( - self, - mock_get_resource, - mock_pecan_request, - network_driver_type, - network_driver_config_dict, - assert_raised=False): - - @v.enforce_network_driver_types_update() - def test(self, cluster_template_ident, patch): - pass - - for key, val in network_driver_config_dict.items(): - CONF.set_override(key, val, 'cluster_template') - - cluster_template_ident = 'test_uuid_or_name' - - patch = [{'path': '/network_driver', 'value': network_driver_type, - 'op': 'replace'}] - context = mock_pecan_request.context - cluster_template = obj_utils.get_test_cluster_template( - context, uuid=cluster_template_ident, coe='kubernetes') - cluster_template.network_driver = network_driver_type - mock_get_resource.return_value = cluster_template - - # Reload the validator module so that ClusterTemplate configs are - # re-evaluated. - reload_module(v) - validator = v.K8sValidator - validator.supported_network_drivers = ['flannel', 'type1', 'type2'] - - if assert_raised: - self.assertRaises(exception.InvalidParameterValue, - test, self, cluster_template_ident, patch) - else: - test(self, cluster_template_ident, patch) - mock_get_resource.assert_called_once_with( - 'ClusterTemplate', cluster_template_ident) - - def test_enforce_network_driver_types_one_allowed_update(self): - self._test_enforce_network_driver_types_update( - network_driver_type='type1', - network_driver_config_dict={ - 'kubernetes_allowed_network_drivers': ['type1']}) - - def test_enforce_network_driver_types_two_allowed_update(self): - self._test_enforce_network_driver_types_update( - network_driver_type='type1', - network_driver_config_dict={ - 'kubernetes_allowed_network_drivers': ['type1', 'type2']}) - - def test_enforce_network_driver_types_not_allowed_update(self): - self._test_enforce_network_driver_types_update( - network_driver_type='type1', - network_driver_config_dict={ - 'kubernetes_allowed_network_drivers': ['type2']}, - assert_raised=True) - - def test_enforce_network_driver_types_all_allowed_update(self): - for driver in ['flannel', 'type1', 'type2']: - self._test_enforce_network_driver_types_update( - network_driver_type=driver, - network_driver_config_dict={ - 'kubernetes_allowed_network_drivers': ['all']}) - - def _test_enforce_volume_driver_types_create( - self, - volume_driver_type, - coe='kubernetes', - assert_raised=False): - - @v.enforce_volume_driver_types_create() - def test(self, cluster_template): - pass - - cluster_template = obj_utils.get_test_cluster_template( - {}, name='test_cluster_template', coe=coe, - volume_driver=volume_driver_type) - - if assert_raised: - self.assertRaises(exception.InvalidParameterValue, - test, self, cluster_template) - else: - test(self, cluster_template) - - def test_enforce_volume_driver_types_valid_create(self): - self._test_enforce_volume_driver_types_create( - volume_driver_type='cinder') - - def test_enforce_volume_driver_types_invalid_create(self): - self._test_enforce_volume_driver_types_create( - volume_driver_type='type', - assert_raised=True) - - def _test_enforce_server_type( - self, - server_type, - coe='kubernetes', - assert_raised=False): - - @v.enforce_server_type() - def test(self, cluster_template): - pass - - cluster_template = obj_utils.get_test_cluster_template( - {}, name='test_cluster_template', coe=coe, - server_type=server_type) - - if assert_raised: - self.assertRaises(exception.InvalidParameterValue, - test, self, cluster_template) - else: - test(self, cluster_template) - - def test_enforce_server_type_valid_vm(self): - self._test_enforce_server_type( - server_type='vm') - - def test_enforce_server_type_valid_bm(self): - self._test_enforce_server_type( - server_type='bm') - - def test_enforce_server_type_invalid(self): - self._test_enforce_server_type( - server_type='invalid', - assert_raised=True) - - @mock.patch('pecan.request') - @mock.patch('magnum.api.utils.get_resource') - def _test_enforce_volume_driver_types_update( - self, - mock_get_resource, - mock_pecan_request, - volume_driver_type, - op, - assert_raised=False): - - @v.enforce_volume_driver_types_update() - def test(self, cluster_template_ident, patch): - pass - - cluster_template_ident = 'test_uuid_or_name' - patch = [{'path': '/volume_driver', 'value': volume_driver_type, - 'op': op}] - context = mock_pecan_request.context - cluster_template = obj_utils.get_test_cluster_template( - context, uuid=cluster_template_ident, coe='kubernetes') - mock_get_resource.return_value = cluster_template - - # Reload the validator module so that ClusterTemplate configs are - # re-evaluated. - reload_module(v) - validator = v.K8sValidator - validator.supported_volume_driver = ['cinder'] - - if assert_raised: - self.assertRaises(exception.InvalidParameterValue, - test, self, cluster_template_ident, patch) - else: - test(self, cluster_template_ident, patch) - mock_get_resource.assert_called_once_with( - 'ClusterTemplate', cluster_template_ident) - - def test_enforce_volume_driver_types_supported_replace_update(self): - self._test_enforce_volume_driver_types_update( - volume_driver_type='cinder', - op='replace') - - def test_enforce_volume_driver_types_not_supported_replace_update(self): - self._test_enforce_volume_driver_types_update( - volume_driver_type='type1', - op='replace', - assert_raised=True) - - def test_enforce_volume_driver_types_supported_add_update(self): - self._test_enforce_volume_driver_types_update( - volume_driver_type='cinder', - op='add') - - def test_enforce_volume_driver_types_not_supported_add_update(self): - self._test_enforce_volume_driver_types_update( - volume_driver_type='type1', - op='add', - assert_raised=True) - - def test_enforce_volume_driver_types_remove_update(self): - self._test_enforce_volume_driver_types_update( - volume_driver_type='cinder', - op='remove') - - def test_validate_cluster_properties(self): - allowed_properties = v.cluster_update_allowed_properties - for field in objects.Cluster.fields: - if field in allowed_properties: - v.validate_cluster_properties(set([field])) - else: - self.assertRaises(exception.InvalidParameterValue, - v.validate_cluster_properties, set([field])) diff --git a/magnum/tests/unit/api/utils.py b/magnum/tests/unit/api/utils.py deleted file mode 100644 index 2ad12936..00000000 --- a/magnum/tests/unit/api/utils.py +++ /dev/null @@ -1,88 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utils for testing the API service. -""" -import datetime - -import pytz - -from magnum.api.controllers.v1 import bay as bay_controller -from magnum.api.controllers.v1 import baymodel as baymodel_controller -from magnum.api.controllers.v1 import cluster as cluster_controller -from magnum.api.controllers.v1 import cluster_template as cluster_tmp_ctrl -from magnum.tests.unit.db import utils - - -def remove_internal(values, internal): - # NOTE(yuriyz): internal attributes should not be posted, except uuid - int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid'] - return {k: v for (k, v) in values.items() if k not in int_attr} - - -def baymodel_post_data(**kw): - baymodel = utils.get_test_cluster_template(**kw) - internal = baymodel_controller.BayModelPatchType.internal_attrs() - return remove_internal(baymodel, internal) - - -def cluster_template_post_data(**kw): - cluster_template = utils.get_test_cluster_template(**kw) - internal = cluster_tmp_ctrl.ClusterTemplatePatchType.internal_attrs() - return remove_internal(cluster_template, internal) - - -def bay_post_data(**kw): - bay = utils.get_test_cluster(**kw) - bay['baymodel_id'] = kw.get('baymodel_id', bay['cluster_template_id']) - bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15) - del bay['cluster_template_id'] - del bay['create_timeout'] - internal = bay_controller.BayPatchType.internal_attrs() - return remove_internal(bay, internal) - - -def cluster_post_data(**kw): - cluster = utils.get_test_cluster(**kw) - cluster['create_timeout'] = kw.get('create_timeout', 15) - internal = cluster_controller.ClusterPatchType.internal_attrs() - return remove_internal(cluster, internal) - - -def cert_post_data(**kw): - return { - 'cluster_uuid': kw.get('cluster_uuid', - '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), - 'csr': kw.get('csr', 'fake-csr'), - 'pem': kw.get('pem', 'fake-pem') - } - - -def quota_post_data(**kw): - return utils.get_test_quota(**kw) - - -def mservice_get_data(**kw): - """Simulate what the RPC layer will get from DB """ - faketime = datetime.datetime(2001, 1, 1, tzinfo=pytz.UTC) - return { - 'binary': kw.get('binary', 'magnum-conductor'), - 'host': kw.get('host', 'fake-host'), - 'id': kw.get('id', 13), - 'report_count': kw.get('report_count', 13), - 'disabled': kw.get('disabled', False), - 'disabled_reason': kw.get('disabled_reason', None), - 'forced_down': kw.get('forced_down', False), - 'last_seen_at': kw.get('last_seen_at', faketime), - 'created_at': kw.get('created_at', faketime), - 'updated_at': kw.get('updated_at', faketime), - } diff --git a/magnum/tests/unit/cmd/__init__.py b/magnum/tests/unit/cmd/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/cmd/test_api.py b/magnum/tests/unit/cmd/test_api.py deleted file mode 100644 index ea55eadb..00000000 --- a/magnum/tests/unit/cmd/test_api.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2016 - Fujitsu, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from oslo_concurrency import processutils - -from magnum.cmd import api -from magnum.tests import base - - -# NOTE(hieulq): need to mock MagnumObject, otherwise other test cases -# will be failed because of setting wrong ovo indirection api -@mock.patch('magnum.objects.base.MagnumObject') -class TestMagnumAPI(base.TestCase): - - @mock.patch('werkzeug.serving.run_simple') - @mock.patch.object(api, 'api_app') - @mock.patch('magnum.common.service.prepare_service') - def test_api_http(self, mock_prep, mock_app, mock_run, mock_base): - api.main() - - app = mock_app.load_app.return_value - mock_prep.assert_called_once_with(mock.ANY) - mock_app.load_app.assert_called_once_with() - workers = processutils.get_worker_count() - mock_run.assert_called_once_with(base.CONF.api.host, - base.CONF.api.port, - app, processes=workers, - ssl_context=None) - - @mock.patch('werkzeug.serving.run_simple') - @mock.patch.object(api, 'api_app') - @mock.patch('magnum.common.service.prepare_service') - def test_api_http_config_workers(self, mock_prep, mock_app, - mock_run, mock_base): - fake_workers = 8 - self.config(workers=fake_workers, group='api') - api.main() - - app = mock_app.load_app.return_value - mock_prep.assert_called_once_with(mock.ANY) - mock_app.load_app.assert_called_once_with() - mock_run.assert_called_once_with(base.CONF.api.host, - base.CONF.api.port, - app, processes=fake_workers, - ssl_context=None) - - @mock.patch('os.path.exists') - @mock.patch('werkzeug.serving.run_simple') - @mock.patch.object(api, 'api_app') - @mock.patch('magnum.common.service.prepare_service') - def test_api_https_no_cert(self, mock_prep, mock_app, mock_run, - mock_exist, mock_base): - self.config(enabled_ssl=True, - ssl_cert_file='tmp_crt', - group='api') - mock_exist.return_value = False - - self.assertRaises(RuntimeError, api.main) - mock_prep.assert_called_once_with(mock.ANY) - mock_app.load_app.assert_called_once_with() - mock_run.assert_not_called() - mock_exist.assert_called_once_with('tmp_crt') - - @mock.patch('os.path.exists') - @mock.patch('werkzeug.serving.run_simple') - @mock.patch.object(api, 'api_app') - @mock.patch('magnum.common.service.prepare_service') - def test_api_https_no_key(self, mock_prep, mock_app, mock_run, - mock_exist, mock_base): - self.config(enabled_ssl=True, - ssl_cert_file='tmp_crt', - ssl_key_file='tmp_key', - group='api') - mock_exist.side_effect = [True, False] - - self.assertRaises(RuntimeError, api.main) - mock_prep.assert_called_once_with(mock.ANY) - mock_app.load_app.assert_called_once_with() - mock_run.assert_not_called() - mock_exist.assert_has_calls([mock.call('tmp_crt'), - mock.call('tmp_key')]) - - @mock.patch('os.path.exists') - @mock.patch('werkzeug.serving.run_simple') - @mock.patch.object(api, 'api_app') - @mock.patch('magnum.common.service.prepare_service') - def test_api_https(self, mock_prep, mock_app, mock_run, - mock_exist, mock_base): - self.config(enabled_ssl=True, - ssl_cert_file='tmp_crt', - ssl_key_file='tmp_key', - group='api') - mock_exist.side_effect = [True, True] - - api.main() - - app = mock_app.load_app.return_value - mock_prep.assert_called_once_with(mock.ANY) - mock_app.load_app.assert_called_once_with() - mock_exist.assert_has_calls([mock.call('tmp_crt'), - mock.call('tmp_key')]) - workers = processutils.get_worker_count() - mock_run.assert_called_once_with(base.CONF.api.host, - base.CONF.api.port, app, - processes=workers, - ssl_context=('tmp_crt', 'tmp_key')) diff --git a/magnum/tests/unit/cmd/test_conductor.py b/magnum/tests/unit/cmd/test_conductor.py deleted file mode 100644 index c46c0f5d..00000000 --- a/magnum/tests/unit/cmd/test_conductor.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2016 - Fujitsu, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from oslo_concurrency import processutils - -from magnum.cmd import conductor -from magnum.tests import base - - -class TestMagnumConductor(base.TestCase): - - @mock.patch('oslo_service.service.launch') - @mock.patch.object(conductor, 'rpc_service') - @mock.patch('magnum.common.service.prepare_service') - def test_conductor(self, mock_prep, mock_rpc, mock_launch): - conductor.main() - - server = mock_rpc.Service.create.return_value - launcher = mock_launch.return_value - mock_prep.assert_called_once_with(mock.ANY) - mock_rpc.Service.create.assert_called_once_with( - base.CONF.conductor.topic, - mock.ANY, mock.ANY, binary='magnum-conductor') - workers = processutils.get_worker_count() - mock_launch.assert_called_once_with(base.CONF, server, - workers=workers) - launcher.wait.assert_called_once_with() - - @mock.patch('oslo_service.service.launch') - @mock.patch.object(conductor, 'rpc_service') - @mock.patch('magnum.common.service.prepare_service') - def test_conductor_config_workers(self, mock_prep, mock_rpc, mock_launch): - fake_workers = 8 - self.config(workers=fake_workers, group='conductor') - conductor.main() - - server = mock_rpc.Service.create.return_value - launcher = mock_launch.return_value - mock_prep.assert_called_once_with(mock.ANY) - mock_rpc.Service.create.assert_called_once_with( - base.CONF.conductor.topic, - mock.ANY, mock.ANY, binary='magnum-conductor') - mock_launch.assert_called_once_with(base.CONF, server, - workers=fake_workers) - launcher.wait.assert_called_once_with() diff --git a/magnum/tests/unit/cmd/test_db_manage.py b/magnum/tests/unit/cmd/test_db_manage.py deleted file mode 100644 index ae81ca1e..00000000 --- a/magnum/tests/unit/cmd/test_db_manage.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2016 - Fujitsu, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import six - -from magnum.cmd import db_manage -from magnum.tests import base - - -class TestMagnumDbManage(base.TestCase): - - def setUp(self): - super(TestMagnumDbManage, self).setUp() - - def clear_conf(): - db_manage.CONF.reset() - db_manage.CONF.unregister_opt(db_manage.command_opt) - - clear_conf() - self.addCleanup(clear_conf) - - @mock.patch('magnum.db.migration.version') - @mock.patch('sys.argv', ['magnum-db-manage', 'version']) - def test_db_manage_version(self, mock_version): - with mock.patch('sys.stdout', new=six.StringIO()) as fakeOutput: - mock_version.return_value = '123456' - db_manage.main() - self.assertEqual('Current DB revision is 123456\n', - fakeOutput.getvalue()) - mock_version.assert_called_once_with() - - @mock.patch('magnum.db.migration.upgrade') - @mock.patch('sys.argv', ['magnum-db-manage', 'upgrade']) - def test_db_manage_upgrade(self, mock_upgrade): - db_manage.main() - mock_upgrade.assert_called_once_with(base.CONF.command.revision) - - @mock.patch('magnum.db.migration.stamp') - @mock.patch('sys.argv', ['magnum-db-manage', 'stamp', 'foo bar']) - def test_db_manage_stamp(self, mock_stamp): - db_manage.main() - mock_stamp.assert_called_once_with('foo bar') - - @mock.patch('magnum.db.migration.revision') - @mock.patch('sys.argv', ['magnum-db-manage', 'revision', '-m', 'foo bar']) - def test_db_manage_revision(self, mock_revision): - db_manage.main() - mock_revision.assert_called_once_with( - message='foo bar', - autogenerate=base.CONF.command.autogenerate) diff --git a/magnum/tests/unit/cmd/test_driver_manage.py b/magnum/tests/unit/cmd/test_driver_manage.py deleted file mode 100644 index 6c99906a..00000000 --- a/magnum/tests/unit/cmd/test_driver_manage.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2016 - Fujitsu, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from magnum.cmd import driver_manage -from magnum.tests import base - - -class TestMagnumDriverManage(base.TestCase): - - # Fake entrypoints method - @staticmethod - def _fake_entry(num_of_entries): - while num_of_entries: - fake_entry = mock.MagicMock() - fake_entry.name = 'magnum_' + 'test_' + \ - 'foo_' + 'bar'*num_of_entries - fake_cls = mock.MagicMock() - fake_definition = fake_cls() - fake_definition.provides = [{'coe': 'foo', 'os': 'bar', - 'server_type': 'test'}] - fake_definition.get_template_definition.return_value = \ - mock.MagicMock(template_path='fake_path') - yield fake_entry, fake_cls - num_of_entries -= 1 - - @mock.patch.object(driver_manage.DriverManager, 'run') - @mock.patch('sys.argv', ['foo', 'bar']) - def test_none_arg(self, mock_run): - args = None - driver_manage.main(args) - mock_run.assert_called_once_with(['bar']) - - # NOTE(hieulq): we fake the entrypoints then we need to mock the cliff - # produce_output in order to assert with fake value - @mock.patch('magnum.cmd.driver_manage.DriverList.produce_output') - @mock.patch('magnum.drivers.common.driver.Driver') - def test_correct_arg_with_details_and_path(self, mock_driver, - mock_produce): - args = ['list-drivers', '-d', '-p'] - mock_driver.load_entry_points.return_value = self._fake_entry(1) - driver_manage.main(args) - mock_driver.load_entry_points.assert_called_once_with() - mock_produce.assert_called_once_with(mock.ANY, mock.ANY, - [('magnum_test_foo_bar', - 'test', - 'bar', 'foo', 'fake_path')]) - - # NOTE(hieulq): we fake the entrypoints then we need to mock the cliff - # produce_output in order to assert with fake value - @mock.patch('magnum.cmd.driver_manage.DriverList.produce_output') - @mock.patch('magnum.drivers.common.driver.Driver') - def test_correct_arg_without_details_and_path(self, mock_driver, - mock_produce): - args = ['list-drivers'] - mock_driver.load_entry_points.return_value = self._fake_entry(1) - driver_manage.main(args) - mock_driver.load_entry_points.assert_called_once_with() - mock_produce.assert_called_once_with(mock.ANY, mock.ANY, - [('magnum_test_foo_bar',)]) diff --git a/magnum/tests/unit/common/__init__.py b/magnum/tests/unit/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/common/cert_manager/__init__.py b/magnum/tests/unit/common/cert_manager/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/common/cert_manager/test_barbican.py b/magnum/tests/unit/common/cert_manager/test_barbican.py deleted file mode 100644 index 5f99baf4..00000000 --- a/magnum/tests/unit/common/cert_manager/test_barbican.py +++ /dev/null @@ -1,296 +0,0 @@ -# Copyright 2014, 2015 Rackspace US, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from barbicanclient import client as barbican_client -from barbicanclient import containers -from barbicanclient import secrets -import mock -from mock import patch - -from magnum.common.cert_manager import barbican_cert_manager as bcm -from magnum.common.cert_manager import cert_manager -from magnum.common import exception as magnum_exc -from magnum.tests import base - - -class TestBarbicanCert(base.BaseTestCase): - - def setUp(self): - # Certificate data - self.certificate = "My Certificate" - self.intermediates = "My Intermediates" - self.private_key = "My Private Key" - self.private_key_passphrase = "My Private Key Passphrase" - - self.certificate_secret = barbican_client.secrets.Secret( - api=mock.MagicMock(), - payload=self.certificate - ) - self.intermediates_secret = barbican_client.secrets.Secret( - api=mock.MagicMock(), - payload=self.intermediates - ) - self.private_key_secret = barbican_client.secrets.Secret( - api=mock.MagicMock(), - payload=self.private_key - ) - self.private_key_passphrase_secret = barbican_client.secrets.Secret( - api=mock.MagicMock(), - payload=self.private_key_passphrase - ) - - super(TestBarbicanCert, self).setUp() - - def test_barbican_cert(self): - container = barbican_client.containers.CertificateContainer( - api=mock.MagicMock(), - certificate=self.certificate_secret, - intermediates=self.intermediates_secret, - private_key=self.private_key_secret, - private_key_passphrase=self.private_key_passphrase_secret - ) - # Create a cert - cert = bcm.Cert( - cert_container=container - ) - - # Validate the cert functions - self.assertEqual(self.certificate, cert.get_certificate()) - self.assertEqual(self.intermediates, cert.get_intermediates()) - self.assertEqual(self.private_key, cert.get_private_key()) - self.assertEqual(self.private_key_passphrase, - cert.get_private_key_passphrase()) - - def test_barbican_cert_none_values(self): - container = barbican_client.containers.CertificateContainer( - api=mock.MagicMock(), - certificate=None, - intermediates=None, - private_key=None, - private_key_passphrase=None - ) - # Create a cert - cert = bcm.Cert( - cert_container=container - ) - - # Validate the cert functions - self.assertIsNone(cert.get_certificate()) - self.assertIsNone(cert.get_intermediates()) - self.assertIsNone(cert.get_private_key()) - self.assertIsNone(cert.get_private_key_passphrase()) - - -class TestBarbicanManager(base.BaseTestCase): - - def setUp(self): - # Make a fake Container and contents - self.barbican_endpoint = 'http://localhost:9311/v1' - self.container_uuid = uuid.uuid4() - - self.container_ref = '{0}/containers/{1}'.format( - self.barbican_endpoint, self.container_uuid - ) - - self.name = 'My Fancy Cert' - self.private_key = mock.Mock(spec=secrets.Secret) - self.certificate = mock.Mock(spec=secrets.Secret) - self.intermediates = mock.Mock(spec=secrets.Secret) - self.private_key_passphrase = mock.Mock(spec=secrets.Secret) - - container = mock.Mock(spec=containers.CertificateContainer) - container.container_ref = self.container_ref - container.name = self.name - container.private_key = self.private_key - container.certificate = self.certificate - container.intermediates = self.intermediates - container.private_key_passphrase = self.private_key_passphrase - self.container = container - - self.empty_container = mock.Mock(spec=containers.CertificateContainer) - - self.secret1 = mock.Mock(spec=secrets.Secret) - self.secret2 = mock.Mock(spec=secrets.Secret) - self.secret3 = mock.Mock(spec=secrets.Secret) - self.secret4 = mock.Mock(spec=secrets.Secret) - - super(TestBarbicanManager, self).setUp() - - @patch('magnum.common.clients.OpenStackClients.barbican') - def test_store_cert(self, mock_barbican): - # Mock out the client - bc = mock.MagicMock() - bc.containers.create_certificate.return_value = self.empty_container - mock_barbican.return_value = bc - - # Attempt to store a cert - bcm.CertManager.store_cert( - certificate=self.certificate, - private_key=self.private_key, - intermediates=self.intermediates, - private_key_passphrase=self.private_key_passphrase, - name=self.name - ) - - # create_secret should be called four times with our data - calls = [ - mock.call(payload=self.certificate, expiration=None, - name=mock.ANY), - mock.call(payload=self.private_key, expiration=None, - name=mock.ANY), - mock.call(payload=self.intermediates, expiration=None, - name=mock.ANY), - mock.call(payload=self.private_key_passphrase, expiration=None, - name=mock.ANY) - ] - bc.secrets.create.assert_has_calls(calls, any_order=True) - - # create_certificate should be called once - self.assertEqual(1, bc.containers.create_certificate.call_count) - - # Container should be stored once - self.empty_container.store.assert_called_once_with() - - @patch('magnum.common.clients.OpenStackClients.barbican') - def test_store_cert_failure(self, mock_barbican): - # Mock out the client - bc = mock.MagicMock() - bc.containers.create_certificate.return_value = self.empty_container - test_secrets = [ - self.secret1, - self.secret2, - self.secret3, - self.secret4 - ] - bc.secrets.create.side_effect = test_secrets - self.empty_container.store.side_effect =\ - magnum_exc.CertificateStorageException - mock_barbican.return_value = bc - - # Attempt to store a cert - self.assertRaises( - magnum_exc.CertificateStorageException, - bcm.CertManager.store_cert, - certificate=self.certificate, - private_key=self.private_key, - intermediates=self.intermediates, - private_key_passphrase=self.private_key_passphrase, - name=self.name - ) - - # create_secret should be called four times with our data - calls = [ - mock.call(payload=self.certificate, expiration=None, - name=mock.ANY), - mock.call(payload=self.private_key, expiration=None, - name=mock.ANY), - mock.call(payload=self.intermediates, expiration=None, - name=mock.ANY), - mock.call(payload=self.private_key_passphrase, expiration=None, - name=mock.ANY) - ] - bc.secrets.create.assert_has_calls(calls, any_order=True) - - # create_certificate should be called once - self.assertEqual(1, bc.containers.create_certificate.call_count) - - # Container should be stored once - self.empty_container.store.assert_called_once_with() - - # All secrets should be deleted (or at least an attempt made) - for s in test_secrets: - s.delete.assert_called_once_with() - - @patch('magnum.common.clients.OpenStackClients.barbican') - def test_get_cert(self, mock_barbican): - # Mock out the client - bc = mock.MagicMock() - bc.containers.register_consumer.return_value = self.container - mock_barbican.return_value = bc - - # Get the container data - data = bcm.CertManager.get_cert( - cert_ref=self.container_ref, - resource_ref=self.container_ref, - service_name='Magnum' - ) - - # 'register_consumer' should be called once with the container_ref - bc.containers.register_consumer.assert_called_once_with( - container_ref=self.container_ref, - url=self.container_ref, - name='Magnum' - ) - - # The returned data should be a Cert object with the correct values - self.assertIsInstance(data, cert_manager.Cert) - self.assertEqual(self.private_key.payload, - data.get_private_key()) - self.assertEqual(self.certificate.payload, - data.get_certificate()) - self.assertEqual(self.intermediates.payload, - data.get_intermediates()) - self.assertEqual(self.private_key_passphrase.payload, - data.get_private_key_passphrase()) - - @patch('magnum.common.clients.OpenStackClients.barbican') - def test_get_cert_no_registration(self, mock_barbican): - # Mock out the client - bc = mock.MagicMock() - bc.containers.get.return_value = self.container - mock_barbican.return_value = bc - - # Get the container data - data = bcm.CertManager.get_cert( - cert_ref=self.container_ref, check_only=True - ) - - # 'get' should be called once with the container_ref - bc.containers.get.assert_called_once_with( - container_ref=self.container_ref - ) - - # The returned data should be a Cert object with the correct values - self.assertIsInstance(data, cert_manager.Cert) - self.assertEqual(self.private_key.payload, - data.get_private_key()) - self.assertEqual(self.certificate.payload, - data.get_certificate()) - self.assertEqual(self.intermediates.payload, - data.get_intermediates()) - self.assertEqual(self.private_key_passphrase.payload, - data.get_private_key_passphrase()) - - @patch('magnum.common.clients.OpenStackClients.barbican') - def test_delete_cert(self, mock_barbican): - # Mock out the client - bc = mock.MagicMock() - bc.containers.get.return_value = self.container - mock_barbican.return_value = bc - - # Attempt to delete a cert - bcm.CertManager.delete_cert( - cert_ref=self.container_ref - ) - - # All secrets should be deleted - self.container.certificate.delete.assert_called_once_with() - self.container.private_key.delete.assert_called_once_with() - self.container.intermediates.delete.assert_called_once_with() - self.container.private_key_passphrase.delete.assert_called_once_with() - - # Container should be deleted once - self.container.delete.assert_called_once_with() diff --git a/magnum/tests/unit/common/cert_manager/test_cert_manager.py b/magnum/tests/unit/common/cert_manager/test_cert_manager.py deleted file mode 100644 index 8ebca884..00000000 --- a/magnum/tests/unit/common/cert_manager/test_cert_manager.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture - -from magnum.common import cert_manager -from magnum.common.cert_manager import barbican_cert_manager as bcm -from magnum.common.cert_manager import cert_manager as cert_manager_iface -from magnum.common.cert_manager import get_backend -from magnum.common.cert_manager import local_cert_manager as lcm -from magnum.tests import base - - -class FakeCert(cert_manager_iface.Cert): - def get_certificate(self): - return 'fake-cert' - - def get_intermediates(self): - return 'fake-intermediates' - - def get_private_key(self): - return 'fake-private-key' - - def get_private_key_passphrase(self): - return 'fake-passphrase' - - -class TestCert(base.BaseTestCase): - @mock.patch.object(cert_manager_iface, 'operations') - def test_get_decrypted_private_key(self, mock_x509_ops): - mock_x509_ops.decrypt_key.return_value = 'fake-key' - fake_cert = FakeCert() - decrypted_key = fake_cert.get_decrypted_private_key() - self.assertEqual('fake-key', decrypted_key) - mock_x509_ops.decrypt_key.assert_called_once_with('fake-private-key', - 'fake-passphrase') - - -class TestCertManager(base.BaseTestCase): - - def setUp(self): - cert_manager._CERT_MANAGER_PLUGIN = None - super(TestCertManager, self).setUp() - - def test_barbican_cert_manager(self): - fixture.Config().config(group='certificates', - cert_manager_type='barbican') - self.assertEqual(get_backend().CertManager, - bcm.CertManager) - - def test_local_cert_manager(self): - fixture.Config().config(group='certificates', - cert_manager_type='local') - self.assertEqual(get_backend().CertManager, - lcm.CertManager) diff --git a/magnum/tests/unit/common/cert_manager/test_local.py b/magnum/tests/unit/common/cert_manager/test_local.py deleted file mode 100644 index 9c44f2b5..00000000 --- a/magnum/tests/unit/common/cert_manager/test_local.py +++ /dev/null @@ -1,284 +0,0 @@ -# Copyright 2014 Rackspace US, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -import mock -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture - -from magnum.common.cert_manager import cert_manager -from magnum.common.cert_manager import local_cert_manager -from magnum.common import exception -from magnum.tests import base - - -class TestLocalCert(base.BaseTestCase): - - def setUp(self): - self.certificate = "My Certificate" - self.intermediates = "My Intermediates" - self.private_key = "My Private Key" - self.private_key_passphrase = "My Private Key Passphrase" - - super(TestLocalCert, self).setUp() - - def test_local_cert(self): - # Create a cert - cert = local_cert_manager.Cert( - certificate=self.certificate, - intermediates=self.intermediates, - private_key=self.private_key, - private_key_passphrase=self.private_key_passphrase - ) - - # Validate the cert functions - self.assertEqual(self.certificate, cert.get_certificate()) - self.assertEqual(self.intermediates, cert.get_intermediates()) - self.assertEqual(self.private_key, cert.get_private_key()) - self.assertEqual(self.private_key_passphrase, - cert.get_private_key_passphrase()) - - -class TestLocalManager(base.BaseTestCase): - - def setUp(self): - self.certificate = "My Certificate" - self.intermediates = "My Intermediates" - self.private_key = "My Private Key" - self.private_key_passphrase = "My Private Key Passphrase" - - def _mock_isfile(path): - _, ext = os.path.splitext(path) - if self.intermediates is None and ext == '.int': - return False - if self.private_key_passphrase is None and ext == '.pass': - return False - return True - isfile_patcher = mock.patch('os.path.isfile') - self.mock_isfile = isfile_patcher.start() - self.addCleanup(isfile_patcher.stop) - self.mock_isfile.side_effect = _mock_isfile - - conf = oslo_fixture.Config(cfg.CONF) - conf.config(group="certificates", storage_path="/tmp/") - - super(TestLocalManager, self).setUp() - - def _open_calls(self, cert_id, mode='w'): - open_calls = [] - unexpected_calls = [] - for ext in ['crt', 'key', 'int', 'pass']: - args = [os.path.join('/tmp/{0}.{1}'.format(cert_id, ext))] - if mode: - args.append(mode) - - call = mock.call(*args) - if ext == 'int' and not self.intermediates: - unexpected_calls.append(call) - elif ext == 'pass' and not self.private_key_passphrase: - unexpected_calls.append(call) - else: - open_calls.append(call) - return open_calls, unexpected_calls - - def _write_calls(self): - write_calls = [ - mock.call(self.certificate), - mock.call(self.private_key), - ] - if self.intermediates: - write_calls.append(mock.call(self.intermediates)) - if self.private_key_passphrase: - write_calls.append(mock.call(self.private_key_passphrase)) - return write_calls - - def _store_cert(self): - file_mock = mock.mock_open() - # Attempt to store the cert - with mock.patch('six.moves.builtins.open', file_mock, create=True): - cert_id = local_cert_manager.CertManager.store_cert( - certificate=self.certificate, - intermediates=self.intermediates, - private_key=self.private_key, - private_key_passphrase=self.private_key_passphrase - ) - - # Check that something came back - self.assertIsNotNone(cert_id) - - # Verify the correct files were opened - open_calls, unexpected_calls = self._open_calls(cert_id) - file_mock.assert_has_calls(open_calls, any_order=True) - for unexpected_call in unexpected_calls: - self.assertNotIn(unexpected_call, file_mock.mock_calls) - - # Verify the writes were made - file_mock().write.assert_has_calls(self._write_calls(), any_order=True) - - return cert_id - - def _get_cert(self, cert_id): - file_mock = mock.mock_open() - # Attempt to retrieve the cert - with mock.patch('six.moves.builtins.open', file_mock, create=True): - data = local_cert_manager.CertManager.get_cert(cert_id) - - # Verify the correct files were opened - open_calls, unexpected_calls = self._open_calls(cert_id, 'r') - file_mock.assert_has_calls(open_calls, any_order=True) - for unexpected_call in unexpected_calls: - self.assertNotIn(unexpected_call, file_mock.mock_calls) - - # The returned data should be a Cert object - self.assertIsInstance(data, cert_manager.Cert) - - return data - - def _get_cert_with_fail(self, cert_id, failed='crt'): - def fake_open(path, mode): - if path == os.path.join('/tmp/{0}.{1}'.format(cert_id, failed)): - raise IOError() - return mock.DEFAULT - - file_mock = mock.mock_open() - file_mock.side_effect = fake_open - # Attempt to retrieve the cert - with mock.patch('six.moves.builtins.open', file_mock, create=True): - self.assertRaises( - exception.CertificateStorageException, - local_cert_manager.CertManager.get_cert, - cert_id - ) - - def _delete_cert(self, cert_id): - remove_mock = mock.Mock() - # Delete the cert - with mock.patch('os.remove', remove_mock): - local_cert_manager.CertManager.delete_cert(cert_id) - - open_calls, unexpected_calls = self._open_calls(cert_id, mode=None) - # Verify the correct files were removed - remove_mock.assert_has_calls(open_calls, any_order=True) - for unexpected_call in unexpected_calls: - self.assertNotIn(unexpected_call, remove_mock.mock_calls) - - def _delete_cert_with_fail(self, cert_id): - remove_mock = mock.Mock() - remove_mock.side_effect = IOError - # Delete the cert - with mock.patch('os.remove', remove_mock): - self.assertRaises( - exception.CertificateStorageException, - local_cert_manager.CertManager.delete_cert, - cert_id - ) - - def test_store_cert(self): - self._store_cert() - - @mock.patch('six.moves.builtins.open', create=True) - def test_store_cert_with_io_error(self, file_mock): - file_mock.side_effect = IOError - - self.assertRaises( - exception.CertificateStorageException, - local_cert_manager.CertManager.store_cert, - certificate=self.certificate, - intermediates=self.intermediates, - private_key=self.private_key, - private_key_passphrase=self.private_key_passphrase - ) - - def test_get_cert(self): - # Store a cert - cert_id = self._store_cert() - - # Get the cert - self._get_cert(cert_id) - - def test_get_cert_with_loading_cert_fail(self): - # Store a cert - cert_id = self._store_cert() - - self._get_cert_with_fail(cert_id, failed='crt') - - def test_get_cert_with_loading_private_key_fail(self): - # Store a cert - cert_id = self._store_cert() - - self._get_cert_with_fail(cert_id, failed='key') - - def test_get_cert_with_loading_intermediates_fail(self): - # Store a cert - cert_id = self._store_cert() - - self._get_cert_with_fail(cert_id, failed='int') - - def test_get_cert_with_loading_pkp_fail(self): - # Store a cert - cert_id = self._store_cert() - - self._get_cert_with_fail(cert_id, failed='pass') - - def test_get_cert_without_intermediate(self): - self.intermediates = None - # Store a cert - cert_id = self._store_cert() - - # Get the cert - self._get_cert(cert_id) - - def test_get_cert_without_pkp(self): - self.private_key_passphrase = None - # Store a cert - cert_id = self._store_cert() - - # Get the cert - self._get_cert(cert_id) - - def test_delete_cert(self): - # Store a cert - cert_id = self._store_cert() - - # Verify the cert exists - self._get_cert(cert_id) - - # Delete the cert - self._delete_cert(cert_id) - - def test_delete_cert_with_fail(self): - # Store a cert - cert_id = self._store_cert() - - # Verify the cert exists - self._get_cert(cert_id) - - # Delete the cert with fail - self._delete_cert_with_fail(cert_id) - - def test_delete_cert_without_intermediate(self): - self.intermediates = None - # Store a cert - cert_id = self._store_cert() - - # Delete the cert with fail - self._delete_cert_with_fail(cert_id) - - def test_delete_cert_without_pkp(self): - self.private_key_passphrase = None - # Store a cert - cert_id = self._store_cert() - - # Delete the cert with fail - self._delete_cert_with_fail(cert_id) diff --git a/magnum/tests/unit/common/cert_manager/test_x509keypair_cert_manager.py b/magnum/tests/unit/common/cert_manager/test_x509keypair_cert_manager.py deleted file mode 100644 index c83af8de..00000000 --- a/magnum/tests/unit/common/cert_manager/test_x509keypair_cert_manager.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2016 Intel, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock - -from magnum.common.cert_manager import x509keypair_cert_manager as x509_cm -from magnum.common import context -from magnum.tests import base -from magnum.tests.unit.db import base as db_base -from magnum.tests.unit.db import utils - - -class TestX509keypairCert(base.BaseTestCase): - - def setUp(self): - self.certificate = "My Certificate" - self.intermediates = "My Intermediates" - self.private_key = "My Private Key" - self.private_key_passphrase = "My Private Key Passphrase" - - super(TestX509keypairCert, self).setUp() - - def test_x509keypair_cert(self): - # Create a cert - cert = x509_cm.Cert( - certificate=self.certificate, - intermediates=self.intermediates, - private_key=self.private_key, - private_key_passphrase=self.private_key_passphrase - ) - - # Validate the cert functions - self.assertEqual(self.certificate, cert.get_certificate()) - self.assertEqual(self.intermediates, cert.get_intermediates()) - self.assertEqual(self.private_key, cert.get_private_key()) - self.assertEqual(self.private_key_passphrase, - cert.get_private_key_passphrase()) - - -class TestX509keypairManager(db_base.DbTestCase): - - def setUp(self): - self.certificate = "My Certificate" - self.intermediates = "My Intermediates" - self.private_key = "My Private Key" - self.private_key_passphrase = "My Private Key Passphrase" - self.context = context.make_admin_context() - super(TestX509keypairManager, self).setUp() - - def test_store_cert(self): - x509keypair = utils.get_test_x509keypair() - with mock.patch.object(self.dbapi, 'create_x509keypair', - autospec=True) as mock_create_x509keypair: - mock_create_x509keypair.return_value = x509keypair - - uuid = x509_cm.CertManager.store_cert(context=self.context, - **x509keypair) - self.assertEqual(uuid, '72625085-c507-4410-9b28-cd7cf1fbf1ad') - - def test_get_cert(self): - x509keypair = utils.get_test_x509keypair(uuid='fake-uuid') - with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', - autospec=True) as mock_get_x509keypair: - mock_get_x509keypair.return_value = x509keypair - cert_obj = x509_cm.CertManager.get_cert('fake-uuid', - context=self.context) - self.assertEqual(cert_obj.certificate, 'certificate') - self.assertEqual(cert_obj.private_key, 'private_key') - self.assertEqual(cert_obj.private_key_passphrase, - 'private_key_passphrase') - self.assertEqual(cert_obj.intermediates, 'intermediates') - mock_get_x509keypair.assert_called_once_with(self.context, - 'fake-uuid') - - def test_delete_cert(self): - x509keypair = utils.get_test_x509keypair(uuid='fake-uuid') - with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', - autospec=True) as mock_get_x509keypair: - mock_get_x509keypair.return_value = x509keypair - with mock.patch.object(self.dbapi, 'destroy_x509keypair', - autospec=True) as mock_destroy_x509keypair: - x509_cm.CertManager.delete_cert('fake-uuid', - context=self.context) - mock_get_x509keypair.assert_called_once_with(self.context, - 'fake-uuid') - mock_destroy_x509keypair.assert_called_once_with('fake-uuid') diff --git a/magnum/tests/unit/common/test_clients.py b/magnum/tests/unit/common/test_clients.py deleted file mode 100644 index eaa3b6f8..00000000 --- a/magnum/tests/unit/common/test_clients.py +++ /dev/null @@ -1,359 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from barbicanclient import client as barbicanclient -from glanceclient import client as glanceclient -from heatclient import client as heatclient -import mock -from neutronclient.v2_0 import client as neutronclient -from novaclient import client as novaclient - -from magnum.common import clients -from magnum.common import exception -import magnum.conf -from magnum.tests import base - -CONF = magnum.conf.CONF - - -class ClientsTest(base.BaseTestCase): - - def setUp(self): - super(ClientsTest, self).setUp() - - CONF.set_override('auth_uri', 'http://server.test:5000/v2.0', - group='keystone_authtoken') - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_url_for(self, mock_keystone): - obj = clients.OpenStackClients(None) - obj.url_for(service_type='fake_service', interface='fake_endpoint') - - mock_endpoint = mock_keystone.return_value.session.get_endpoint - mock_endpoint.assert_called_once_with(service_type='fake_service', - interface='fake_endpoint') - - @mock.patch.object(clients.OpenStackClients, 'keystone') - def test_magnum_url(self, mock_keystone): - fake_region = 'fake_region' - fake_endpoint = 'fake_endpoint' - CONF.set_override('region_name', fake_region, - group='magnum_client') - CONF.set_override('endpoint_type', fake_endpoint, - group='magnum_client') - obj = clients.OpenStackClients(None) - obj.magnum_url() - - mock_endpoint = mock_keystone.return_value.session.get_endpoint - mock_endpoint.assert_called_once_with(region_name=fake_region, - service_type='container-infra', - interface=fake_endpoint) - - @mock.patch.object(heatclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'url_for') - @mock.patch.object(clients.OpenStackClients, 'auth_url') - def _test_clients_heat(self, expected_region_name, mock_auth, mock_url, - mock_call): - mock_auth.__get__ = mock.Mock(return_value="keystone_url") - con = mock.MagicMock() - con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._heat = None - obj.heat() - mock_call.assert_called_once_with( - CONF.heat_client.api_version, - endpoint='url_from_keystone', username=None, - cert_file=None, token='3bcc3d3a03f44e3d8377f9247b0ad155', - auth_url='keystone_url', ca_file=None, key_file=None, - password=None, insecure=False) - mock_url.assert_called_once_with(service_type='orchestration', - interface='publicURL', - region_name=expected_region_name) - - def test_clients_heat(self): - self._test_clients_heat(None) - - def test_clients_heat_region(self): - CONF.set_override('region_name', 'myregion', group='heat_client') - self._test_clients_heat('myregion') - - def test_clients_heat_noauth(self): - con = mock.MagicMock() - con.auth_token = None - con.auth_token_info = None - con.trust_id = None - auth_url = mock.PropertyMock(name="auth_url", - return_value="keystone_url") - type(con).auth_url = auth_url - con.get_url_for = mock.Mock(name="get_url_for") - con.get_url_for.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._heat = None - self.assertRaises(exception.AuthorizationFailure, obj.heat) - - @mock.patch.object(clients.OpenStackClients, 'url_for') - @mock.patch.object(clients.OpenStackClients, 'auth_url') - def test_clients_heat_cached(self, mock_auth, mock_url): - mock_auth.__get__ = mock.Mock(return_value="keystone_url") - con = mock.MagicMock() - con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._heat = None - heat = obj.heat() - heat_cached = obj.heat() - self.assertEqual(heat, heat_cached) - - @mock.patch.object(glanceclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'url_for') - @mock.patch.object(clients.OpenStackClients, 'auth_url') - def _test_clients_glance(self, expected_region_name, mock_auth, mock_url, - mock_call): - mock_auth.__get__ = mock.Mock(return_value="keystone_url") - con = mock.MagicMock() - con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._glance = None - obj.glance() - mock_call.assert_called_once_with( - CONF.glance_client.api_version, - endpoint='url_from_keystone', username=None, - token='3bcc3d3a03f44e3d8377f9247b0ad155', - auth_url='keystone_url', - password=None, cacert=None, cert=None, key=None, insecure=False) - mock_url.assert_called_once_with(service_type='image', - interface='publicURL', - region_name=expected_region_name) - - def test_clients_glance(self): - self._test_clients_glance(None) - - def test_clients_glance_region(self): - CONF.set_override('region_name', 'myregion', group='glance_client') - self._test_clients_glance('myregion') - - def test_clients_glance_noauth(self): - con = mock.MagicMock() - con.auth_token = None - con.auth_token_info = None - con.trust_id = None - auth_url = mock.PropertyMock(name="auth_url", - return_value="keystone_url") - type(con).auth_url = auth_url - con.get_url_for = mock.Mock(name="get_url_for") - con.get_url_for.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._glance = None - self.assertRaises(exception.AuthorizationFailure, obj.glance) - - @mock.patch.object(clients.OpenStackClients, 'url_for') - @mock.patch.object(clients.OpenStackClients, 'auth_url') - def test_clients_glance_cached(self, mock_auth, mock_url): - mock_auth.__get__ = mock.Mock(return_value="keystone_url") - con = mock.MagicMock() - con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._glance = None - glance = obj.glance() - glance_cached = obj.glance() - self.assertEqual(glance, glance_cached) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - @mock.patch.object(barbicanclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'url_for') - def _test_clients_barbican(self, expected_region_name, mock_url, - mock_call, mock_keystone): - con = mock.MagicMock() - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - keystone = mock.MagicMock() - keystone.session = mock.MagicMock() - mock_keystone.return_value = keystone - obj = clients.OpenStackClients(con) - obj._barbican = None - obj.barbican() - mock_call.assert_called_once_with( - endpoint='url_from_keystone', - session=keystone.session) - - mock_keystone.assert_called_once_with() - mock_url.assert_called_once_with(service_type='key-manager', - interface='publicURL', - region_name=expected_region_name) - - def test_clients_barbican(self): - self._test_clients_barbican(None) - - def test_clients_barbican_region(self): - CONF.set_override('region_name', 'myregion', - group='barbican_client') - self._test_clients_barbican('myregion') - - def test_clients_barbican_noauth(self): - con = mock.MagicMock() - con.auth_token = None - con.auth_token_info = None - con.trust_id = None - auth_url = mock.PropertyMock(name="auth_url", - return_value="keystone_url") - type(con).auth_url = auth_url - con.get_url_for = mock.Mock(name="get_url_for") - con.get_url_for.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._barbican = None - self.assertRaises(exception.AuthorizationFailure, obj.barbican) - - @mock.patch.object(clients.OpenStackClients, 'keystone') - @mock.patch.object(clients.OpenStackClients, 'url_for') - def test_clients_barbican_cached(self, mock_url, mock_keystone): - con = mock.MagicMock() - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - keystone = mock.MagicMock() - keystone.session = mock.MagicMock() - mock_keystone.return_value = keystone - obj = clients.OpenStackClients(con) - obj._barbican = None - barbican = obj.barbican() - barbican_cached = obj.barbican() - self.assertEqual(barbican, barbican_cached) - - @mock.patch.object(novaclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'keystone') - @mock.patch.object(clients.OpenStackClients, 'url_for') - @mock.patch.object(clients.OpenStackClients, 'auth_url') - def _test_clients_nova(self, expected_region_name, mock_auth, mock_url, - mock_keystone, mock_call): - mock_auth.__get__ = mock.Mock(return_value="keystone_url") - con = mock.MagicMock() - keystone = mock.MagicMock() - keystone.session = mock.MagicMock() - mock_keystone.return_value = keystone - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._nova = None - obj.nova() - expected_kwargs = {'session': keystone.session, - 'endpoint_override': mock_url.return_value, - 'cacert': None, - 'insecure': False} - mock_call.assert_called_once_with(CONF.nova_client.api_version, - **expected_kwargs) - mock_url.assert_called_once_with(service_type='compute', - interface='publicURL', - region_name=expected_region_name) - - def test_clients_nova(self): - self._test_clients_nova(None) - - def test_clients_nova_region(self): - CONF.set_override('region_name', 'myregion', group='nova_client') - self._test_clients_nova('myregion') - - def test_clients_nova_noauth(self): - con = mock.MagicMock() - con.auth_token = None - con.auth_token_info = None - con.trust_id = None - auth_url = mock.PropertyMock(name="auth_url", - return_value="keystone_url") - type(con).auth_url = auth_url - con.get_url_for = mock.Mock(name="get_url_for") - con.get_url_for.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._nova = None - self.assertRaises(exception.AuthorizationFailure, obj.nova) - - @mock.patch.object(clients.OpenStackClients, 'url_for') - @mock.patch.object(clients.OpenStackClients, 'auth_url') - def test_clients_nova_cached(self, mock_auth, mock_url): - mock_auth.__get__ = mock.Mock(return_value="keystone_url") - con = mock.MagicMock() - con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" - con.auth_token_info = "auth-token-info" - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._nova = None - nova = obj.nova() - nova_cached = obj.nova() - self.assertEqual(nova, nova_cached) - - @mock.patch.object(neutronclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'url_for') - @mock.patch.object(clients.OpenStackClients, 'auth_url') - def _test_clients_neutron(self, expected_region_name, mock_auth, mock_url, - mock_call): - fake_endpoint_type = 'fake_endpoint_type' - CONF.set_override('endpoint_type', fake_endpoint_type, - group='neutron_client') - mock_auth.__get__ = mock.Mock(return_value="keystone_url") - con = mock.MagicMock() - con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._neutron = None - obj.neutron() - mock_call.assert_called_once_with( - endpoint_url='url_from_keystone', - endpoint_type=fake_endpoint_type, - auth_url='keystone_url', - token='3bcc3d3a03f44e3d8377f9247b0ad155', - ca_cert=None, insecure=False) - mock_url.assert_called_once_with(service_type='network', - interface=fake_endpoint_type, - region_name=expected_region_name) - - def test_clients_neutron(self): - self._test_clients_neutron(None) - - def test_clients_neutron_region(self): - CONF.set_override('region_name', 'myregion', - group='neutron_client') - self._test_clients_neutron('myregion') - - def test_clients_neutron_noauth(self): - con = mock.MagicMock() - con.auth_token = None - con.auth_token_info = None - con.trust_id = None - auth_url = mock.PropertyMock(name="auth_url", - return_value="keystone_url") - type(con).auth_url = auth_url - con.get_url_for = mock.Mock(name="get_url_for") - con.get_url_for.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._neutron = None - self.assertRaises(exception.AuthorizationFailure, obj.neutron) - - @mock.patch.object(clients.OpenStackClients, 'url_for') - @mock.patch.object(clients.OpenStackClients, 'auth_url') - def test_clients_neutron_cached(self, mock_auth, mock_url): - mock_auth.__get__ = mock.Mock(return_value="keystone_url") - con = mock.MagicMock() - con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" - con.auth_url = "keystone_url" - mock_url.return_value = "url_from_keystone" - obj = clients.OpenStackClients(con) - obj._neutron = None - neutron = obj.neutron() - neutron_cached = obj.neutron() - self.assertEqual(neutron, neutron_cached) diff --git a/magnum/tests/unit/common/test_context.py b/magnum/tests/unit/common/test_context.py deleted file mode 100644 index 1e207dae..00000000 --- a/magnum/tests/unit/common/test_context.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.common import context as magnum_context -from magnum.tests import base - - -class ContextTestCase(base.TestCase): - - def _create_context(self, roles=None): - return magnum_context.RequestContext(auth_token='auth_token1', - auth_url='auth_url1', - domain_id='domain_id1', - domain_name='domain_name1', - user_name='user1', - user_id='user-id1', - project_name='tenant1', - project_id='tenant-id1', - roles=roles, - is_admin=True, - read_only=True, - show_deleted=True, - request_id='request_id1', - trust_id='trust_id1', - auth_token_info='token_info1') - - def test_context(self): - ctx = self._create_context() - - self.assertEqual("auth_token1", ctx.auth_token) - self.assertEqual("auth_url1", ctx.auth_url) - self.assertEqual("domain_id1", ctx.domain_id) - self.assertEqual("domain_name1", ctx.domain_name) - self.assertEqual("user1", ctx.user_name) - self.assertEqual("user-id1", ctx.user_id) - self.assertEqual("tenant1", ctx.project_name) - self.assertEqual("tenant-id1", ctx.project_id) - self.assertEqual([], ctx.roles) - self.assertTrue(ctx.is_admin) - self.assertTrue(ctx.read_only) - self.assertTrue(ctx.show_deleted) - self.assertEqual("request_id1", ctx.request_id) - self.assertEqual("trust_id1", ctx.trust_id) - self.assertEqual("token_info1", ctx.auth_token_info) - - def test_context_with_roles(self): - ctx = self._create_context(roles=['admin', 'service']) - - self.assertEqual("auth_token1", ctx.auth_token) - self.assertEqual("auth_url1", ctx.auth_url) - self.assertEqual("domain_id1", ctx.domain_id) - self.assertEqual("domain_name1", ctx.domain_name) - self.assertEqual("user1", ctx.user_name) - self.assertEqual("user-id1", ctx.user_id) - self.assertEqual("tenant1", ctx.project_name) - self.assertEqual("tenant-id1", ctx.project_id) - for role in ctx.roles: - self.assertIn(role, ['admin', 'service']) - self.assertTrue(ctx.is_admin) - self.assertTrue(ctx.read_only) - self.assertTrue(ctx.show_deleted) - self.assertEqual("request_id1", ctx.request_id) - self.assertEqual("trust_id1", ctx.trust_id) - self.assertEqual("token_info1", ctx.auth_token_info) - - def test_to_dict_from_dict(self): - ctx = self._create_context() - ctx2 = magnum_context.RequestContext.from_dict(ctx.to_dict()) - - self.assertEqual(ctx.auth_token, ctx2.auth_token) - self.assertEqual(ctx.auth_url, ctx2.auth_url) - self.assertEqual(ctx.domain_id, ctx2.domain_id) - self.assertEqual(ctx.domain_name, ctx2.domain_name) - self.assertEqual(ctx.user_name, ctx2.user_name) - self.assertEqual(ctx.user_id, ctx2.user_id) - self.assertEqual(ctx.tenant, ctx2.tenant) - self.assertEqual(ctx.project_name, ctx2.project_name) - self.assertEqual(ctx.project_id, ctx2.project_id) - self.assertEqual(ctx.is_admin, ctx2.is_admin) - self.assertEqual(ctx.read_only, ctx2.read_only) - self.assertEqual(ctx.roles, ctx2.roles) - self.assertEqual(ctx.show_deleted, ctx2.show_deleted) - self.assertEqual(ctx.request_id, ctx2.request_id) - self.assertEqual(ctx.trust_id, ctx2.trust_id) - self.assertEqual(ctx.auth_token_info, ctx2.auth_token_info) - - def test_request_context_sets_is_admin(self): - ctxt = magnum_context.make_admin_context() - self.assertTrue(ctxt.is_admin) diff --git a/magnum/tests/unit/common/test_docker_utils.py b/magnum/tests/unit/common/test_docker_utils.py deleted file mode 100644 index 8933957f..00000000 --- a/magnum/tests/unit/common/test_docker_utils.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import docker -import mock - -from magnum.common import docker_utils -import magnum.conf -from magnum.tests import base - - -CONF = magnum.conf.CONF - - -class TestDockerUtils(base.BaseTestCase): - - def test_is_docker_api_version_atleast(self): - - def fake_version(): - return {'ApiVersion': '1.18'} - - docker_client = mock.MagicMock() - docker_client.version.side_effect = fake_version - res = docker_utils.is_docker_api_version_atleast(docker_client, '1.21') - self.assertFalse(res) - - -class DockerClientTestCase(base.BaseTestCase): - def test_docker_client_init(self): - client = docker_utils.DockerHTTPClient() - - self.assertEqual(CONF.docker.docker_remote_api_version, - client.api_version) - self.assertEqual(CONF.docker.default_timeout, - client.timeout) - - def test_docker_client_init_timeout(self): - expected_timeout = 300 - client = docker_utils.DockerHTTPClient(timeout=expected_timeout) - - self.assertEqual(CONF.docker.docker_remote_api_version, - client.api_version) - self.assertEqual(expected_timeout, client.timeout) - - def test_docker_client_init_url(self): - expected_url = 'http://127.0.0.1:2375' - client = docker_utils.DockerHTTPClient(url=expected_url) - - self.assertEqual(expected_url, - client.base_url) - self.assertEqual(CONF.docker.docker_remote_api_version, - client.api_version) - self.assertEqual(CONF.docker.default_timeout, - client.timeout) - - def test_docker_client_init_version(self): - expected_version = '1.16' - client = docker_utils.DockerHTTPClient(ver=expected_version) - - self.assertEqual(expected_version, - client.api_version) - self.assertEqual(CONF.docker.default_timeout, - client.timeout) - - @mock.patch.object(docker.APIClient, 'inspect_container') - @mock.patch.object(docker.APIClient, 'containers') - def test_list_instances(self, mock_containers, mock_inspect): - client = docker_utils.DockerHTTPClient() - - containers = [dict(Id=x) for x in range(0, 3)] - inspect_results = [dict(Config=dict(Hostname=x)) for x in range(0, 3)] - - mock_containers.return_value = containers - mock_inspect.side_effect = inspect_results - - instances = client.list_instances() - - self.assertEqual([0, 1, 2], instances) - mock_containers.assert_called_once_with(all=True) - mock_inspect.assert_has_calls([mock.call(x) for x in range(0, 3)]) - - @mock.patch.object(docker.APIClient, 'inspect_container') - @mock.patch.object(docker.APIClient, 'containers') - def test_list_instances_inspect(self, mock_containers, mock_inspect): - client = docker_utils.DockerHTTPClient() - - containers = [dict(Id=x) for x in range(0, 3)] - inspect_results = [dict(Config=dict(Hostname=x)) for x in range(0, 3)] - - mock_containers.return_value = containers - mock_inspect.side_effect = inspect_results - - instances = client.list_instances(inspect=True) - - self.assertEqual(inspect_results, instances) - mock_containers.assert_called_once_with(all=True) - mock_inspect.assert_has_calls([mock.call(x) for x in range(0, 3)]) diff --git a/magnum/tests/unit/common/test_exception.py b/magnum/tests/unit/common/test_exception.py deleted file mode 100644 index 13c70cfa..00000000 --- a/magnum/tests/unit/common/test_exception.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect - -from magnum.common import exception -from magnum.i18n import _ -from magnum.tests import base - - -class TestMagnumException(exception.MagnumException): - message = _("templated %(name)s") - - -class TestException(base.BaseTestCase): - - def raise_(self, ex): - raise ex - - def test_message_is_templated(self): - ex = TestMagnumException(name="NAME") - self.assertEqual("templated NAME", ex.message) - - def test_custom_message_is_templated(self): - ex = TestMagnumException(_("custom templated %(name)s"), name="NAME") - self.assertEqual("custom templated NAME", ex.message) - - def test_all_exceptions(self): - for name, obj in inspect.getmembers(exception): - if inspect.isclass(obj) and issubclass(obj, Exception): - self.assertRaises(obj, self.raise_, obj()) diff --git a/magnum/tests/unit/common/test_keystone.py b/magnum/tests/unit/common/test_keystone.py deleted file mode 100644 index 449db0cd..00000000 --- a/magnum/tests/unit/common/test_keystone.py +++ /dev/null @@ -1,242 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture - -from keystoneauth1 import exceptions as ka_exception -from keystoneauth1 import identity as ka_identity -import keystoneclient.exceptions as kc_exception - -from magnum.common import exception -from magnum.common import keystone -import magnum.conf -from magnum.conf import keystone as ksconf -from magnum.tests import base -from magnum.tests import utils - -CONF = magnum.conf.CONF - - -@mock.patch('keystoneclient.v3.client.Client') -class KeystoneClientTest(base.TestCase): - - def setUp(self): - super(KeystoneClientTest, self).setUp() - dummy_url = 'http://server.test:5000/v3' - - self.ctx = utils.dummy_context() - self.ctx.auth_url = dummy_url - self.ctx.auth_token = 'abcd1234' - - plugin = keystone.ka_loading.get_plugin_loader('password') - opts = keystone.ka_loading.get_auth_plugin_conf_options(plugin) - cfg_fixture = self.useFixture(fixture.Config()) - cfg_fixture.register_opts(opts, group=ksconf.CFG_GROUP) - self.config(auth_type='password', - auth_url=dummy_url, - username='fake_user', - password='fake_pass', - project_name='fake_project', - group=ksconf.CFG_GROUP) - - self.config(auth_uri=dummy_url, - admin_user='magnum', - admin_password='varybadpass', - admin_tenant_name='service', - group=ksconf.CFG_LEGACY_GROUP) - - # Disable global mocking for trustee_domain_id - self.stop_global( - 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id') - - def tearDown(self): - # Re-enable global mocking for trustee_domain_id. We need this because - # mock blows up when trying to stop an already stopped patch (which it - # will do due to the addCleanup() in base.TestCase). - self.start_global( - 'magnum.common.keystone.KeystoneClientV3.trustee_domain_id') - - super(KeystoneClientTest, self).tearDown() - - def test_client_with_password(self, mock_ks): - self.ctx.is_admin = True - self.ctx.auth_token_info = None - self.ctx.auth_token = None - self.ctx.trust_id = None - ks_client = keystone.KeystoneClientV3(self.ctx) - ks_client.client - session = ks_client.session - auth_plugin = session.auth - mock_ks.assert_called_once_with(session=session, trust_id=None) - self.assertIsInstance(auth_plugin, ka_identity.Password) - - @mock.patch('magnum.common.keystone.ka_loading') - @mock.patch('magnum.common.keystone.ka_v3') - def test_client_with_password_legacy(self, mock_v3, mock_loading, mock_ks): - self.ctx.is_admin = True - self.ctx.auth_token_info = None - self.ctx.auth_token = None - self.ctx.trust_id = None - mock_loading.load_auth_from_conf_options.side_effect = \ - ka_exception.MissingRequiredOptions(mock.MagicMock()) - ks_client = keystone.KeystoneClientV3(self.ctx) - ks_client.client - session = ks_client.session - self.assertWarnsRegex(Warning, - '[keystone_authtoken] section is deprecated') - mock_v3.Password.assert_called_once_with( - auth_url='http://server.test:5000/v3', password='varybadpass', - project_domain_id='default', project_name='service', - user_domain_id='default', username='magnum') - mock_ks.assert_called_once_with(session=session, trust_id=None) - - @mock.patch('magnum.common.keystone.ka_access') - def test_client_with_access_info(self, mock_access, mock_ks): - self.ctx.auth_token_info = mock.MagicMock() - ks_client = keystone.KeystoneClientV3(self.ctx) - ks_client.client - session = ks_client.session - auth_plugin = session.auth - mock_access.create.assert_called_once_with(body=mock.ANY, - auth_token='abcd1234') - mock_ks.assert_called_once_with(session=session, trust_id=None) - self.assertIsInstance(auth_plugin, ka_identity.access.AccessInfoPlugin) - - @mock.patch('magnum.common.keystone.ka_v3') - def test_client_with_token(self, mock_v3, mock_ks): - ks_client = keystone.KeystoneClientV3(self.ctx) - ks_client.client - session = ks_client.session - mock_v3.Token.assert_called_once_with( - auth_url='http://server.test:5000/v3', token='abcd1234') - mock_ks.assert_called_once_with(session=session, trust_id=None) - - def test_client_with_no_credentials(self, mock_ks): - self.ctx.auth_token = None - ks_client = keystone.KeystoneClientV3(self.ctx) - self.assertRaises(exception.AuthorizationFailure, - ks_client._get_auth) - mock_ks.assert_not_called() - - def test_delete_trust(self, mock_ks): - mock_ks.return_value.trusts.delete.return_value = None - ks_client = keystone.KeystoneClientV3(self.ctx) - cluster = mock.MagicMock() - cluster.trust_id = 'atrust123' - self.assertIsNone(ks_client.delete_trust(self.ctx, cluster)) - mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123') - - def test_delete_trust_not_found(self, mock_ks): - mock_delete = mock_ks.return_value.trusts.delete - mock_delete.side_effect = kc_exception.NotFound() - ks_client = keystone.KeystoneClientV3(self.ctx) - cluster = mock.MagicMock() - cluster.trust_id = 'atrust123' - self.assertIsNone(ks_client.delete_trust(self.ctx, cluster)) - - @mock.patch('keystoneauth1.session.Session') - def test_create_trust_with_all_roles(self, mock_session, mock_ks): - mock_session.return_value.get_user_id.return_value = '123456' - mock_session.return_value.get_project_id.return_value = '654321' - - self.ctx.roles = ['role1', 'role2'] - ks_client = keystone.KeystoneClientV3(self.ctx) - - ks_client.create_trust(trustee_user='888888') - - mock_ks.return_value.trusts.create.assert_called_once_with( - delegation_depth=0, - trustor_user='123456', project='654321', - trustee_user='888888', role_names=['role1', 'role2'], - impersonation=True) - - @mock.patch('keystoneauth1.session.Session') - def test_create_trust_with_limit_roles(self, mock_session, mock_ks): - mock_session.return_value.get_user_id.return_value = '123456' - mock_session.return_value.get_project_id.return_value = '654321' - - self.ctx.roles = ['role1', 'role2'] - ks_client = keystone.KeystoneClientV3(self.ctx) - - CONF.set_override('roles', ['role3'], group='trust') - ks_client.create_trust(trustee_user='888888') - - mock_ks.return_value.trusts.create.assert_called_once_with( - delegation_depth=0, - trustor_user='123456', project='654321', - trustee_user='888888', role_names=['role3'], - impersonation=True) - - @mock.patch('magnum.common.keystone.KeystoneClientV3.trustee_domain_id') - def test_create_trustee(self, mock_tdi, mock_ks): - expected_username = '_username' - expected_password = '_password' - expected_domain = '_expected_trustee_domain_id' - mock_tdi.__get__ = mock.MagicMock(return_value=expected_domain) - - ks_client = keystone.KeystoneClientV3(self.ctx) - ks_client.create_trustee( - username=expected_username, - password=expected_password, - ) - mock_ks.return_value.users.create.assert_called_once_with( - name=expected_username, - password=expected_password, - domain=expected_domain, - ) - - @mock.patch('magnum.common.keystone.KeystoneClientV3.domain_admin_auth') - @mock.patch('magnum.common.keystone.KeystoneClientV3.domain_admin_session') - def test_trustee_domain_id(self, mock_session, mock_auth, mock_ks): - expected_domain_id = '_expected_domain_id' - _mock_session = mock.MagicMock() - mock_session.__get__ = mock.MagicMock(return_value=_mock_session) - _mock_auth = mock.MagicMock() - mock_auth.__get__ = mock.MagicMock(return_value=_mock_auth) - mock_access = mock.MagicMock() - mock_access.domain_id = expected_domain_id - _mock_auth.get_access.return_value = mock_access - - ks_client = keystone.KeystoneClientV3(self.ctx) - self.assertEqual(expected_domain_id, ks_client.trustee_domain_id) - - _mock_auth.get_access.assert_called_once_with( - _mock_session - ) - - def test_get_validate_region_name(self, mock_ks): - key = 'region_name' - val = 'RegionOne' - CONF.set_override(key, val, 'cinder_client') - mock_region = mock.MagicMock() - mock_region.id = 'RegionOne' - mock_ks.return_value.regions.list.return_value = [mock_region] - ks_client = keystone.KeystoneClientV3(self.ctx) - region_name = ks_client.get_validate_region_name(val) - self.assertEqual('RegionOne', region_name) - - def test_get_validate_region_name_not_found(self, mock_ks): - key = 'region_name' - val = 'region123' - CONF.set_override(key, val, 'cinder_client') - ks_client = keystone.KeystoneClientV3(self.ctx) - self.assertRaises(exception.InvalidParameterValue, - ks_client.get_validate_region_name, val) - - def test_get_validate_region_name_is_None(self, mock_ks): - key = 'region_name' - val = None - CONF.set_override(key, val, 'cinder_client') - ks_client = keystone.KeystoneClientV3(self.ctx) - self.assertRaises(exception.InvalidParameterValue, - ks_client.get_validate_region_name, val) diff --git a/magnum/tests/unit/common/test_policy.py b/magnum/tests/unit/common/test_policy.py deleted file mode 100644 index d4456ed6..00000000 --- a/magnum/tests/unit/common/test_policy.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy as oslo_policy - -from magnum.common import context as magnum_context -from magnum.common import policy - -from magnum.tests import base - - -class TestPolicy(base.TestCase): - - def setUp(self): - super(TestPolicy, self).setUp() - rules_dict = {"context_is_admin": "role:admin"} - self.rules = oslo_policy.Rules.from_dict(rules_dict) - - def test_check_is_admin_with_admin_context_succeeds(self): - ctx = magnum_context.RequestContext(user='test-user', - project_id='test-project-id', - is_admin=True) - # explicitly set admin role as this test checks for admin role - # with the policy engine - ctx.roles = ['admin'] - self.assertTrue(policy.check_is_admin(ctx)) - - def test_check_is_admin_with_user_context_fails(self): - ctx = magnum_context.RequestContext(user='test-user', - project_id='test-project-id') - # there is no admin role set in the context, so check_is_admin - # should return False - self.assertFalse(policy.check_is_admin(ctx)) diff --git a/magnum/tests/unit/common/test_profiler.py b/magnum/tests/unit/common/test_profiler.py deleted file mode 100644 index 5fded8d0..00000000 --- a/magnum/tests/unit/common/test_profiler.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import mock - -from oslo_config import cfg -from oslo_utils import importutils -from osprofiler import initializer as profiler_init -from osprofiler import opts as profiler_opts -import six.moves as six - -from magnum.common import profiler -from magnum import conf -from magnum.tests import base - - -class TestProfiler(base.TestCase): - def test_all_public_methods_are_traced(self): - profiler_opts.set_defaults(conf.CONF) - self.config(enabled=True, - group='profiler') - - classes = [ - 'magnum.conductor.api.API', - 'magnum.conductor.api.ListenerAPI', - 'magnum.conductor.handlers.ca_conductor.Handler', - 'magnum.conductor.handlers.cluster_conductor.Handler', - 'magnum.conductor.handlers.conductor_listener.Handler', - 'magnum.conductor.handlers.indirection_api.Handler', - 'magnum.service.periodic.MagnumPeriodicTasks', - ] - for clsname in classes: - # give the metaclass and trace_cls() decorator a chance to patch - # methods of the classes above - six.reload_module( - importutils.import_module(clsname.rsplit('.', 1)[0])) - cls = importutils.import_class(clsname) - - for attr, obj in cls.__dict__.items(): - # only public methods are traced - if attr.startswith('_'): - continue - # only checks callables - if not (inspect.ismethod(obj) or inspect.isfunction(obj)): - continue - # osprofiler skips static methods - if isinstance(obj, staticmethod): - continue - - self.assertTrue(getattr(obj, '__traced__', False), obj) - - @mock.patch.object(profiler_init, 'init_from_conf') - def test_setup_profiler(self, mock_init): - self.config(enabled=True, - group='profiler') - - profiler.setup('foo', 'localhost') - - mock_init.assert_called_once_with(conf=conf.CONF, - context=mock.ANY, - project="magnum", - service='foo', - host='localhost') - - @mock.patch.object(profiler_init, 'init_from_conf') - @mock.patch.object(conf, 'CONF', new=cfg.ConfigOpts()) - def test_setup_profiler_without_osprofiler(self, mock_init): - profiler.setup('foo', 'localhost') - self.assertEqual(False, mock_init.called) diff --git a/magnum/tests/unit/common/test_rpc.py b/magnum/tests/unit/common/test_rpc.py deleted file mode 100644 index 7414f265..00000000 --- a/magnum/tests/unit/common/test_rpc.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2017 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import oslo_messaging as messaging -from oslo_messaging.rpc import dispatcher -from oslo_serialization import jsonutils - -from magnum.common import context -from magnum.common import rpc -from magnum.tests import base - - -class TestRpc(base.TestCase): - @mock.patch.object(rpc, 'profiler', None) - @mock.patch.object(rpc, 'RequestContextSerializer') - @mock.patch.object(messaging, 'RPCClient') - def test_get_client(self, mock_client, mock_ser): - rpc.TRANSPORT = mock.Mock() - tgt = mock.Mock() - ser = mock.Mock() - mock_client.return_value = 'client' - mock_ser.return_value = ser - - client = rpc.get_client(tgt, version_cap='1.0', serializer='foo', - timeout=6969) - - mock_ser.assert_called_once_with('foo') - mock_client.assert_called_once_with(rpc.TRANSPORT, - tgt, version_cap='1.0', - serializer=ser, timeout=6969) - self.assertEqual('client', client) - - @mock.patch.object(rpc, 'profiler', mock.Mock()) - @mock.patch.object(rpc, 'ProfilerRequestContextSerializer') - @mock.patch.object(messaging, 'RPCClient') - def test_get_client_profiler_enabled(self, mock_client, mock_ser): - rpc.TRANSPORT = mock.Mock() - tgt = mock.Mock() - ser = mock.Mock() - mock_client.return_value = 'client' - mock_ser.return_value = ser - - client = rpc.get_client(tgt, version_cap='1.0', serializer='foo', - timeout=6969) - - mock_ser.assert_called_once_with('foo') - mock_client.assert_called_once_with(rpc.TRANSPORT, - tgt, version_cap='1.0', - serializer=ser, timeout=6969) - self.assertEqual('client', client) - - @mock.patch.object(rpc, 'profiler', None) - @mock.patch.object(rpc, 'RequestContextSerializer') - @mock.patch.object(messaging, 'get_rpc_server') - def test_get_server(self, mock_get, mock_ser): - rpc.TRANSPORT = mock.Mock() - ser = mock.Mock() - tgt = mock.Mock() - ends = mock.Mock() - mock_ser.return_value = ser - mock_get.return_value = 'server' - access_policy = dispatcher.DefaultRPCAccessPolicy - server = rpc.get_server(tgt, ends, serializer='foo') - - mock_ser.assert_called_once_with('foo') - mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, - executor='eventlet', serializer=ser, - access_policy=access_policy) - self.assertEqual('server', server) - - @mock.patch.object(rpc, 'profiler', mock.Mock()) - @mock.patch.object(rpc, 'ProfilerRequestContextSerializer') - @mock.patch.object(messaging, 'get_rpc_server') - def test_get_server_profiler_enabled(self, mock_get, mock_ser): - rpc.TRANSPORT = mock.Mock() - ser = mock.Mock() - tgt = mock.Mock() - ends = mock.Mock() - mock_ser.return_value = ser - mock_get.return_value = 'server' - access_policy = dispatcher.DefaultRPCAccessPolicy - server = rpc.get_server(tgt, ends, serializer='foo') - - mock_ser.assert_called_once_with('foo') - mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, - executor='eventlet', serializer=ser, - access_policy=access_policy) - self.assertEqual('server', server) - - @mock.patch.object(messaging, 'TransportURL') - def test_get_transport_url(self, mock_url): - conf = mock.Mock() - rpc.CONF = conf - mock_url.parse.return_value = 'foo' - - url = rpc.get_transport_url(url_str='bar') - - self.assertEqual('foo', url) - mock_url.parse.assert_called_once_with(conf, 'bar') - - @mock.patch.object(messaging, 'TransportURL') - def test_get_transport_url_null(self, mock_url): - conf = mock.Mock() - rpc.CONF = conf - mock_url.parse.return_value = 'foo' - - url = rpc.get_transport_url() - - self.assertEqual('foo', url) - mock_url.parse.assert_called_once_with(conf, None) - - def test_cleanup_transport_null(self): - rpc.TRANSPORT = None - rpc.NOTIFIER = mock.Mock() - self.assertRaises(AssertionError, rpc.cleanup) - - def test_cleanup_notifier_null(self): - rpc.TRANSPORT = mock.Mock() - rpc.NOTIFIER = None - self.assertRaises(AssertionError, rpc.cleanup) - - def test_cleanup(self): - rpc.NOTIFIER = mock.Mock() - rpc.TRANSPORT = mock.Mock() - trans_cleanup = mock.Mock() - rpc.TRANSPORT.cleanup = trans_cleanup - - rpc.cleanup() - - trans_cleanup.assert_called_once_with() - self.assertIsNone(rpc.TRANSPORT) - self.assertIsNone(rpc.NOTIFIER) - - def test_add_extra_exmods(self): - rpc.EXTRA_EXMODS = [] - - rpc.add_extra_exmods('foo', 'bar') - - self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS) - - def test_clear_extra_exmods(self): - rpc.EXTRA_EXMODS = ['foo', 'bar'] - - rpc.clear_extra_exmods() - - self.assertEqual(0, len(rpc.EXTRA_EXMODS)) - - def test_serialize_entity(self): - with mock.patch.object(jsonutils, 'to_primitive') as mock_prim: - rpc.JsonPayloadSerializer.serialize_entity('context', 'entity') - - mock_prim.assert_called_once_with('entity', convert_instances=True) - - -class TestRequestContextSerializer(base.TestCase): - def setUp(self): - super(TestRequestContextSerializer, self).setUp() - self.mock_base = mock.Mock() - self.ser = rpc.RequestContextSerializer(self.mock_base) - self.ser_null = rpc.RequestContextSerializer(None) - - def test_serialize_entity(self): - self.mock_base.serialize_entity.return_value = 'foo' - - ser_ent = self.ser.serialize_entity('context', 'entity') - - self.mock_base.serialize_entity.assert_called_once_with('context', - 'entity') - self.assertEqual('foo', ser_ent) - - def test_serialize_entity_null_base(self): - ser_ent = self.ser_null.serialize_entity('context', 'entity') - - self.assertEqual('entity', ser_ent) - - def test_deserialize_entity(self): - self.mock_base.deserialize_entity.return_value = 'foo' - - deser_ent = self.ser.deserialize_entity('context', 'entity') - - self.mock_base.deserialize_entity.assert_called_once_with('context', - 'entity') - self.assertEqual('foo', deser_ent) - - def test_deserialize_entity_null_base(self): - deser_ent = self.ser_null.deserialize_entity('context', 'entity') - - self.assertEqual('entity', deser_ent) - - def test_serialize_context(self): - context = mock.Mock() - - self.ser.serialize_context(context) - - context.to_dict.assert_called_once_with() - - @mock.patch.object(context, 'RequestContext') - def test_deserialize_context(self, mock_req): - self.ser.deserialize_context('context') - - mock_req.from_dict.assert_called_once_with('context') - - -class TestProfilerRequestContextSerializer(base.TestCase): - def setUp(self): - super(TestProfilerRequestContextSerializer, self).setUp() - self.ser = rpc.ProfilerRequestContextSerializer(mock.Mock()) - - @mock.patch('magnum.common.rpc.profiler') - def test_serialize_context(self, mock_profiler): - prof = mock_profiler.get.return_value - prof.hmac_key = 'swordfish' - prof.get_base_id.return_value = 'baseid' - prof.get_id.return_value = 'parentid' - - context = mock.Mock() - context.to_dict.return_value = {'project_id': 'test'} - - self.assertEqual({ - 'project_id': 'test', - 'trace_info': { - 'hmac_key': 'swordfish', - 'base_id': 'baseid', - 'parent_id': 'parentid' - } - }, self.ser.serialize_context(context)) - - @mock.patch('magnum.common.rpc.profiler') - def test_deserialize_context(self, mock_profiler): - serialized = {'project_id': 'test', - 'trace_info': { - 'hmac_key': 'swordfish', - 'base_id': 'baseid', - 'parent_id': 'parentid'}} - - context = self.ser.deserialize_context(serialized) - - self.assertEqual('test', context.project_id) - mock_profiler.init.assert_called_once_with( - hmac_key='swordfish', base_id='baseid', parent_id='parentid') diff --git a/magnum/tests/unit/common/test_service.py b/magnum/tests/unit/common/test_service.py deleted file mode 100644 index 463e0904..00000000 --- a/magnum/tests/unit/common/test_service.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2016 OpenStack Foundation -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_log import log as logging - -from magnum.common import service -from magnum.tests import base - - -class TestMagnumService(base.BaseTestCase): - - @mock.patch.object(logging, 'register_options') - @mock.patch.object(logging, 'setup') - @mock.patch('magnum.common.config.set_config_defaults') - @mock.patch('magnum.common.config.parse_args') - def test_prepare_service_with_argv_not_none(self, mock_parse, mock_set, - mock_setup, mock_reg): - argv = 'foo' - mock_parse.side_effect = lambda *args, **kwargs: None - - service.prepare_service(argv) - - mock_parse.assert_called_once_with(argv) - mock_setup.assert_called_once_with(base.CONF, 'magnum') - mock_reg.assert_called_once_with(base.CONF) - mock_set.assert_called_once_with() - - @mock.patch.object(logging, 'register_options') - @mock.patch.object(logging, 'setup') - @mock.patch('magnum.common.config.set_config_defaults') - @mock.patch('magnum.common.config.parse_args') - def test_prepare_service_with_argv_none(self, mock_parse, mock_set, - mock_setup, mock_reg): - argv = None - mock_parse.side_effect = lambda *args, **kwargs: None - - service.prepare_service(argv) - - mock_parse.assert_called_once_with([]) - mock_setup.assert_called_once_with(base.CONF, 'magnum') - mock_reg.assert_called_once_with(base.CONF) - mock_set.assert_called_once_with() diff --git a/magnum/tests/unit/common/test_short_id.py b/magnum/tests/unit/common/test_short_id.py deleted file mode 100644 index 3e64321a..00000000 --- a/magnum/tests/unit/common/test_short_id.py +++ /dev/null @@ -1,73 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import testtools - -from magnum.common import short_id - - -class ShortIdTest(testtools.TestCase): - - def test_byte_string_8(self): - self.assertEqual('\xab', short_id._to_byte_string(0xab, 8)) - self.assertEqual('\x05', short_id._to_byte_string(0x05, 8)) - - def test_byte_string_16(self): - self.assertEqual('\xab\xcd', short_id._to_byte_string(0xabcd, 16)) - self.assertEqual('\x0a\xbc', short_id._to_byte_string(0xabc, 16)) - - def test_byte_string_12(self): - self.assertEqual('\xab\xc0', short_id._to_byte_string(0xabc, 12)) - self.assertEqual('\x0a\xb0', short_id._to_byte_string(0x0ab, 12)) - - def test_byte_string_60(self): - val = 0x111111111111111 - byte_string = short_id._to_byte_string(val, 60) - self.assertEqual('\x11\x11\x11\x11\x11\x11\x11\x10', byte_string) - - def test_get_id_string(self): - id = short_id.get_id('11111111-1111-4111-bfff-ffffffffffff') - self.assertEqual('ceirceirceir', id) - - def test_get_id_uuid_1(self): - source = uuid.UUID('11111111-1111-4111-bfff-ffffffffffff') - self.assertEqual(0x111111111111111, source.time) - self.assertEqual('ceirceirceir', short_id.get_id(source)) - - def test_get_id_uuid_f(self): - source = uuid.UUID('ffffffff-ffff-4fff-8000-000000000000') - self.assertEqual('777777777777', short_id.get_id(source)) - - def test_get_id_uuid_0(self): - source = uuid.UUID('00000000-0000-4000-bfff-ffffffffffff') - self.assertEqual('aaaaaaaaaaaa', short_id.get_id(source)) - - def test_get_id_uuid_endianness(self): - source = uuid.UUID('ffffffff-00ff-4000-aaaa-aaaaaaaaaaaa') - self.assertEqual('aaaa77777777', short_id.get_id(source)) - - def test_get_id_uuid1(self): - source = uuid.uuid1() - self.assertRaises(ValueError, short_id.get_id, source) - - def test_generate_ids(self): - allowed_chars = 'abcdefghijklmnopqrstuvwxyz234567' - ids = [short_id.generate_id() for i in range(25)] - - for id in ids: - self.assertEqual(12, len(id)) - s = ''.join(ch for ch in id if ch not in allowed_chars) - self.assertEqual(s, '') - self.assertEqual(1, ids.count(id)) diff --git a/magnum/tests/unit/common/test_urlfetch.py b/magnum/tests/unit/common/test_urlfetch.py deleted file mode 100644 index b65351df..00000000 --- a/magnum/tests/unit/common/test_urlfetch.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from mock import patch -from oslo_config import cfg - -from magnum.common import urlfetch -from magnum.tests import base - - -class TestUrlFetch(base.BaseTestCase): - - def test_get_unsupported_scheme(self): - self.assertRaises(urlfetch.URLFetchError, - urlfetch.get, - 'https://example.com', - ('http')) - - @patch('requests.get') - def test_get(self, - mock_request_get): - mock_reader = mock.MagicMock() - mock_reader.__iter__.return_value = ['a', 'b', 'c'] - mock_response = mock.MagicMock() - mock_response.iter_content.return_value = mock_reader - mock_request_get.return_value = mock_response - - self.assertEqual('abc', urlfetch.get('http://example.com')) - - @patch('requests.get') - def test_get_exceed_manifest_size(self, - mock_request_get): - cfg.CONF.set_override("max_manifest_size", 1) - - mock_reader = mock.MagicMock() - mock_reader.__iter__.return_value = ['a', 'b'] - mock_response = mock.MagicMock() - mock_response.iter_content.return_value = mock_reader - mock_request_get.return_value = mock_response - - self.assertRaises(urlfetch.URLFetchError, - urlfetch.get, - 'http://example.com') diff --git a/magnum/tests/unit/common/test_utils.py b/magnum/tests/unit/common/test_utils.py deleted file mode 100644 index a864360f..00000000 --- a/magnum/tests/unit/common/test_utils.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -import os -import os.path -import shutil -import tempfile - -import mock -from oslo_concurrency import processutils -from oslo_utils import netutils - -from magnum.common import exception -from magnum.common import utils -from magnum.tests import base - - -class UtilsTestCase(base.TestCase): - - def test_get_k8s_quantity(self): - self.assertEqual(1024000.0, utils.get_k8s_quantity('1000Ki')) - self.assertEqual(0.001, utils.get_k8s_quantity('1E-3')) - self.assertEqual(0.5, utils.get_k8s_quantity('0.0005k')) - self.assertEqual(0.5, utils.get_k8s_quantity('500m')) - self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E+6')) - self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E6')) - self.assertRaises(exception.UnsupportedK8sQuantityFormat, - utils.get_k8s_quantity, '1E1E') - - def test_get_docker_quantity(self): - self.assertEqual(512, utils.get_docker_quantity('512')) - self.assertEqual(512, utils.get_docker_quantity('512b')) - self.assertEqual(512 * 1024, utils.get_docker_quantity('512k')) - self.assertEqual(512 * 1024 * 1024, utils.get_docker_quantity('512m')) - self.assertEqual(512 * 1024 * 1024 * 1024, - utils.get_docker_quantity('512g')) - self.assertRaises(exception.UnsupportedDockerQuantityFormat, - utils.get_docker_quantity, '512bb') - self.assertRaises(exception.UnsupportedDockerQuantityFormat, - utils.get_docker_quantity, '512B') - - -class ExecuteTestCase(base.TestCase): - - def test_retry_on_failure(self): - fd, tmpfilename = tempfile.mkstemp() - _, tmpfilename2 = tempfile.mkstemp() - try: - fp = os.fdopen(fd, 'w+') - fp.write('''#!/bin/sh -# If stdin fails to get passed during one of the runs, make a note. -if ! grep -q foo -then - echo 'failure' > "$1" -fi -# If stdin has failed to get passed during this or a previous run, exit early. -if grep failure "$1" -then - exit 1 -fi -runs="$(cat $1)" -if [ -z "$runs" ] -then - runs=0 -fi -runs=$(($runs + 1)) -echo $runs > "$1" -exit 1 -''') - fp.close() - os.chmod(tmpfilename, 0o755) - try: - self.assertRaises(processutils.ProcessExecutionError, - utils.execute, - tmpfilename, tmpfilename2, attempts=10, - process_input=b'foo', - delay_on_retry=False) - except OSError as e: - if e.errno == errno.EACCES: - self.skipTest("Permissions error detected. " - "Are you running with a noexec /tmp?") - else: - raise - with open(tmpfilename2, 'r') as fp: - runs = fp.read() - self.assertNotEqual(runs.strip(), 'failure', 'stdin did not ' - 'always get passed ' - 'correctly') - runs = int(runs.strip()) - self.assertEqual(10, runs, - 'Ran %d times instead of 10.' % runs) - finally: - os.unlink(tmpfilename) - os.unlink(tmpfilename2) - - def test_unknown_kwargs_raises_error(self): - self.assertRaises(processutils.UnknownArgumentError, - utils.execute, - '/usr/bin/env', 'true', - this_is_not_a_valid_kwarg=True) - - def test_check_exit_code_boolean(self): - utils.execute('/usr/bin/env', 'false', check_exit_code=False) - self.assertRaises(processutils.ProcessExecutionError, - utils.execute, - '/usr/bin/env', 'false', check_exit_code=True) - - def test_no_retry_on_success(self): - fd, tmpfilename = tempfile.mkstemp() - _, tmpfilename2 = tempfile.mkstemp() - try: - fp = os.fdopen(fd, 'w+') - fp.write('''#!/bin/sh -# If we've already run, bail out. -grep -q foo "$1" && exit 1 -# Mark that we've run before. -echo foo > "$1" -# Check that stdin gets passed correctly. -grep foo -''') - fp.close() - os.chmod(tmpfilename, 0o755) - try: - utils.execute(tmpfilename, - tmpfilename2, - process_input=b'foo', - attempts=2) - except OSError as e: - if e.errno == errno.EACCES: - self.skipTest("Permissions error detected. " - "Are you running with a noexec /tmp?") - else: - raise - finally: - os.unlink(tmpfilename) - os.unlink(tmpfilename2) - - @mock.patch.object(processutils, 'execute') - @mock.patch.object(os.environ, 'copy', return_value={}) - def test_execute_use_standard_locale_no_env_variables(self, env_mock, - execute_mock): - utils.execute('foo', use_standard_locale=True) - execute_mock.assert_called_once_with('foo', - env_variables={'LC_ALL': 'C'}) - - @mock.patch.object(processutils, 'execute') - def test_execute_use_standard_locale_with_env_variables(self, - execute_mock): - utils.execute('foo', use_standard_locale=True, - env_variables={'foo': 'bar'}) - execute_mock.assert_called_once_with('foo', - env_variables={'LC_ALL': 'C', - 'foo': 'bar'}) - - @mock.patch.object(processutils, 'execute') - def test_execute_not_use_standard_locale(self, execute_mock): - utils.execute('foo', use_standard_locale=False, - env_variables={'foo': 'bar'}) - execute_mock.assert_called_once_with('foo', - env_variables={'foo': 'bar'}) - - def test_execute_get_root_helper(self): - with mock.patch.object(processutils, 'execute') as execute_mock: - helper = utils._get_root_helper() - utils.execute('foo', run_as_root=True) - execute_mock.assert_called_once_with('foo', run_as_root=True, - root_helper=helper) - - def test_execute_without_root_helper(self): - with mock.patch.object(processutils, 'execute') as execute_mock: - utils.execute('foo', run_as_root=False) - execute_mock.assert_called_once_with('foo', run_as_root=False) - - def test_validate_and_normalize_mac(self): - mac = 'AA:BB:CC:DD:EE:FF' - with mock.patch.object(netutils, 'is_valid_mac') as m_mock: - m_mock.return_value = True - self.assertEqual(mac.lower(), - utils.validate_and_normalize_mac(mac)) - - def test_validate_and_normalize_mac_invalid_format(self): - with mock.patch.object(netutils, 'is_valid_mac') as m_mock: - m_mock.return_value = False - self.assertRaises(exception.InvalidMAC, - utils.validate_and_normalize_mac, 'invalid-mac') - - def test_safe_rstrip(self): - value = '/test/' - rstripped_value = '/test' - not_rstripped = '/' - - self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/')) - self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/')) - - def test_safe_rstrip_not_raises_exceptions(self): - # Supplying an integer should normally raise an exception because it - # does not save the rstrip() method. - value = 10 - - # In the case of raising an exception safe_rstrip() should return the - # original value. - self.assertEqual(value, utils.safe_rstrip(value)) - - -class TempFilesTestCase(base.TestCase): - - def test_tempdir(self): - - dirname = None - with utils.tempdir() as tempdir: - self.assertTrue(os.path.isdir(tempdir)) - dirname = tempdir - self.assertFalse(os.path.exists(dirname)) - - @mock.patch.object(shutil, 'rmtree') - @mock.patch.object(tempfile, 'mkdtemp') - def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock): - - self.config(tempdir='abc') - mkdtemp_mock.return_value = 'temp-dir' - kwargs = {'a': 'b'} - - with utils.tempdir(**kwargs) as tempdir: - self.assertEqual('temp-dir', tempdir) - tempdir_created = tempdir - - mkdtemp_mock.assert_called_once_with(**kwargs) - rmtree_mock.assert_called_once_with(tempdir_created) - - @mock.patch.object(utils, 'LOG') - @mock.patch.object(shutil, 'rmtree') - @mock.patch.object(tempfile, 'mkdtemp') - def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock, - log_mock): - - self.config(tempdir='abc') - mkdtemp_mock.return_value = 'temp-dir' - rmtree_mock.side_effect = OSError - - with utils.tempdir() as tempdir: - self.assertEqual('temp-dir', tempdir) - tempdir_created = tempdir - - rmtree_mock.assert_called_once_with(tempdir_created) - self.assertTrue(log_mock.error.called) - - -class GeneratePasswordTestCase(base.TestCase): - def test_generate_password(self): - password = utils.generate_password(length=12) - self.assertTrue([c for c in password if c in '0123456789']) - self.assertTrue([c for c in password - if c in 'abcdefghijklmnopqrstuvwxyz']) - self.assertTrue([c for c in password - if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) diff --git a/magnum/tests/unit/common/x509/__init__.py b/magnum/tests/unit/common/x509/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/common/x509/test_operations.py b/magnum/tests/unit/common/x509/test_operations.py deleted file mode 100644 index b6e1ce0e..00000000 --- a/magnum/tests/unit/common/x509/test_operations.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2015 Rackspace, inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cryptography.hazmat.primitives import serialization -import mock - -from magnum.common.x509 import operations -from magnum.tests import base - - -class TestX509Operations(base.BaseTestCase): - def setUp(self): - super(TestX509Operations, self).setUp() - - @mock.patch.object(serialization, 'NoEncryption') - @mock.patch.object(operations, 'default_backend') - @mock.patch.object(operations, '_load_pem_private_key') - def test_decrypt_key(self, mock_load_pem_private_key, - mock_default_backend, mock_no_encryption_class): - mock_private_key = mock.MagicMock() - mock_load_pem_private_key.return_value = mock_private_key - mock_private_key.private_bytes.return_value = mock.sentinel.decrypted - - actual_decrypted = operations.decrypt_key(mock.sentinel.key, - mock.sentinel.passphrase) - - mock_load_pem_private_key.assert_called_once_with( - mock.sentinel.key, mock.sentinel.passphrase) - mock_private_key.private_bytes.assert_called_once_with( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=mock_no_encryption_class.return_value - ) - self.assertEqual(mock.sentinel.decrypted, actual_decrypted) diff --git a/magnum/tests/unit/common/x509/test_sign.py b/magnum/tests/unit/common/x509/test_sign.py deleted file mode 100644 index 7771c90b..00000000 --- a/magnum/tests/unit/common/x509/test_sign.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives import serialization -from cryptography import x509 as c_x509 -from cryptography.x509.oid import NameOID -import mock -import six - -from magnum.common import exception -from magnum.common.x509 import operations -from magnum.tests import base - - -class TestX509(base.BaseTestCase): - - def setUp(self): - super(TestX509, self).setUp() - self.issuer_name = six.u("fake-issuer") - self.subject_name = six.u("fake-subject") - self.ca_encryption_password = six.b("fake-ca-password") - self.encryption_password = six.b("fake-password") - - def _load_pems(self, keypairs, encryption_password): - private_key = serialization.load_pem_private_key( - keypairs['private_key'], - password=encryption_password, - backend=default_backend(), - ) - certificate = c_x509.load_pem_x509_certificate( - keypairs['certificate'], default_backend()) - - return certificate, private_key - - def _generate_ca_certificate(self, issuer_name=None): - issuer_name = issuer_name or self.issuer_name - keypairs = operations.generate_ca_certificate( - issuer_name, encryption_password=self.ca_encryption_password) - - return self._load_pems(keypairs, self.ca_encryption_password) - - def _generate_client_certificate(self, issuer_name, subject_name): - ca = operations.generate_ca_certificate( - self.issuer_name, encryption_password=self.ca_encryption_password) - keypairs = operations.generate_client_certificate( - self.issuer_name, - self.subject_name, - ca['private_key'], - encryption_password=self.encryption_password, - ca_key_password=self.ca_encryption_password, - ) - - return self._load_pems(keypairs, self.encryption_password) - - def _public_bytes(self, public_key): - return public_key.public_bytes( - serialization.Encoding.PEM, - serialization.PublicFormat.SubjectPublicKeyInfo - ) - - def _private_bytes(self, private_key): - return private_key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=serialization.NoEncryption() - ) - - def _generate_private_key(self): - return rsa.generate_private_key( - public_exponent=65537, - key_size=2048, - backend=default_backend() - ) - - def _build_csr(self, private_key): - csr = c_x509.CertificateSigningRequestBuilder() - csr = csr.subject_name(c_x509.Name([ - c_x509.NameAttribute(NameOID.COMMON_NAME, self.subject_name) - ])) - - return csr.sign(private_key, hashes.SHA256(), default_backend()) - - def assertHasPublicKey(self, keypairs): - key = keypairs[1] - cert = keypairs[0] - - self.assertEqual(self._public_bytes(key.public_key()), - self._public_bytes(cert.public_key())) - - def assertHasSubjectName(self, cert, subject_name): - actual_subject_name = cert.subject.get_attributes_for_oid( - c_x509.NameOID.COMMON_NAME) - actual_subject_name = actual_subject_name[0].value - - self.assertEqual(subject_name, actual_subject_name) - - def assertHasIssuerName(self, cert, issuer_name): - actual_issuer_name = cert.issuer.get_attributes_for_oid( - c_x509.NameOID.COMMON_NAME) - actual_issuer_name = actual_issuer_name[0].value - - self.assertEqual(issuer_name, actual_issuer_name) - - def assertInClientExtensions(self, cert): - key_usage = c_x509.KeyUsage(True, False, True, False, False, False, - False, False, False) - key_usage = c_x509.Extension(key_usage.oid, True, key_usage) - extended_key_usage = c_x509.ExtendedKeyUsage([c_x509.OID_CLIENT_AUTH]) - extended_key_usage = c_x509.Extension(extended_key_usage.oid, False, - extended_key_usage) - basic_constraints = c_x509.BasicConstraints(ca=False, path_length=None) - basic_constraints = c_x509.Extension(basic_constraints.oid, True, - basic_constraints) - - self.assertIn(key_usage, cert.extensions) - self.assertIn(extended_key_usage, cert.extensions) - self.assertIn(basic_constraints, cert.extensions) - - def test_generate_ca_certificate_with_bytes_issuer_name(self): - issuer_name = six.b("bytes-issuer-name") - cert, _ = self._generate_ca_certificate(issuer_name) - - issuer_name = issuer_name.decode('utf-8') - self.assertHasSubjectName(cert, issuer_name) - self.assertHasIssuerName(cert, issuer_name) - - def test_generate_ca_certificate_has_publickey(self): - keypairs = self._generate_ca_certificate(self.issuer_name) - - self.assertHasPublicKey(keypairs) - - def test_generate_ca_certificate_set_subject_name(self): - cert, _ = self._generate_ca_certificate(self.issuer_name) - - self.assertHasSubjectName(cert, self.issuer_name) - - def test_generate_ca_certificate_set_issuer_name(self): - cert, _ = self._generate_ca_certificate(self.issuer_name) - - self.assertHasIssuerName(cert, self.issuer_name) - - def test_generate_ca_certificate_set_extentions_as_ca(self): - cert, _ = self._generate_ca_certificate(self.issuer_name) - - key_usage = c_x509.KeyUsage(False, False, False, False, False, True, - False, False, False) - key_usage = c_x509.Extension(key_usage.oid, True, key_usage) - basic_constraints = c_x509.BasicConstraints(ca=True, path_length=0) - basic_constraints = c_x509.Extension(basic_constraints.oid, True, - basic_constraints) - - self.assertIn(key_usage, cert.extensions) - self.assertIn(basic_constraints, cert.extensions) - - def test_generate_client_certificate_has_publickey(self): - keypairs = self._generate_client_certificate( - self.issuer_name, self.subject_name) - - self.assertHasPublicKey(keypairs) - - def test_generate_client_certificate_set_subject_name(self): - cert, _ = self._generate_client_certificate( - self.issuer_name, self.subject_name) - - self.assertHasSubjectName(cert, self.subject_name) - - def test_generate_client_certificate_set_issuer_name(self): - cert, key = self._generate_client_certificate( - self.issuer_name, self.subject_name) - - self.assertHasIssuerName(cert, self.issuer_name) - - def test_generate_client_certificate_set_extentions_as_client(self): - cert, key = self._generate_client_certificate( - self.issuer_name, self.subject_name) - - self.assertInClientExtensions(cert) - - def test_load_pem_private_key_with_bytes_private_key(self): - private_key = self._generate_private_key() - private_key = self._private_bytes(private_key) - - self.assertIsInstance(private_key, six.binary_type) - private_key = operations._load_pem_private_key(private_key) - self.assertIsInstance(private_key, rsa.RSAPrivateKey) - - def test_load_pem_private_key_with_unicode_private_key(self): - private_key = self._generate_private_key() - private_key = self._private_bytes(private_key) - private_key = six.text_type(private_key.decode('utf-8')) - - self.assertIsInstance(private_key, six.text_type) - private_key = operations._load_pem_private_key(private_key) - self.assertIsInstance(private_key, rsa.RSAPrivateKey) - - @mock.patch('cryptography.x509.load_pem_x509_csr') - @mock.patch('six.b') - def test_sign_with_unicode_csr(self, mock_six, mock_load_pem): - ca_key = self._generate_private_key() - private_key = self._generate_private_key() - csr_obj = self._build_csr(private_key) - csr = csr_obj.public_bytes(serialization.Encoding.PEM) - csr = six.text_type(csr.decode('utf-8')) - - mock_load_pem.return_value = csr_obj - operations.sign(csr, self.issuer_name, ca_key, - skip_validation=True) - mock_six.assert_called_once_with(csr) - - def test_sign_with_invalid_csr(self): - ca_key = self._generate_private_key() - csr = 'test' - csr = six.u(csr) - - self.assertRaises(exception.InvalidCsr, - operations.sign, - csr, self.issuer_name, ca_key, skip_validation=True) diff --git a/magnum/tests/unit/common/x509/test_validator.py b/magnum/tests/unit/common/x509/test_validator.py deleted file mode 100644 index 96618904..00000000 --- a/magnum/tests/unit/common/x509/test_validator.py +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from cryptography import x509 as c_x509 - -from magnum.common.exception import CertificateValidationError -from magnum.common.x509 import validator as v - - -class TestValidators(unittest.TestCase): - def _build_key_usage(self, critical=False): - # Digital Signature and Key Encipherment are enabled - key_usage = c_x509.KeyUsage( - True, False, True, False, False, False, False, False, False) - return c_x509.Extension(key_usage.oid, critical, key_usage) - - def _build_basic_constraints(self, ca=False, critical=False): - bc = c_x509.BasicConstraints(ca, None) - return c_x509.Extension(bc.oid, critical, bc) - - def test_filter_allowed_extensions(self): - key_usage = self._build_key_usage(critical=True) - - actual = [e for e in v.filter_allowed_extensions([key_usage], - ['keyUsage'])] - self.assertEqual([key_usage], actual) - - def test_filter_allowed_extensions_disallowed_but_not_critical(self): - key_usage = self._build_key_usage() - - actual = [e for e in v.filter_allowed_extensions([key_usage], - ['subjectAltName'])] - - self.assertEqual([], actual) - - def test_filter_allowed_extensions_disallowed(self): - key_usage = self._build_key_usage(critical=True) - - with self.assertRaises(CertificateValidationError): - next(v.filter_allowed_extensions([key_usage], ['subjectAltName'])) - - def test_merge_key_usage(self): - key_usage = self._build_key_usage(critical=True) - - self.assertEqual(key_usage, - v._merge_key_usage(key_usage, - ['Digital Signature', - 'Key Encipherment'])) - - def test_merge_key_usage_disallowed_but_not_critical(self): - key_usage = self._build_key_usage() - expected = c_x509.KeyUsage( - True, False, False, False, False, False, False, False, False) - expected = c_x509.Extension(expected.oid, False, expected) - - self.assertEqual(expected, - v._merge_key_usage(key_usage, - ['Digital Signature'])) - - def test_merge_key_usage_disallowed(self): - key_usage = self._build_key_usage(critical=True) - - with self.assertRaises(CertificateValidationError): - v._merge_key_usage(key_usage, ['Digital Signature']) - - def test_disallow_ca_in_basic_constraints_not_critical(self): - bc = self._build_basic_constraints(ca=True) - expected = self._build_basic_constraints(ca=False) - - self.assertEqual(expected, v._disallow_ca_in_basic_constraints(bc)) - - def test_disallow_ca_in_basic_constraints(self): - bc = self._build_basic_constraints(ca=True, critical=True) - - with self.assertRaises(CertificateValidationError): - v._disallow_ca_in_basic_constraints(bc) - - def test_disallow_ca_in_basic_constraints_with_non_ca(self): - bc = self._build_basic_constraints(ca=False) - - self.assertEqual(bc, v._disallow_ca_in_basic_constraints(bc)) - - def test_remove_ca_key_usage(self): - contains_ca_key_usage = set([ - "Digital Signature", "Certificate Sign", "CRL Sign"]) - - self.assertEqual(set(["Digital Signature"]), - v._remove_ca_key_usage(contains_ca_key_usage)) - - def test_remove_ca_key_usage_cert_sign(self): - contains_ca_key_usage = set(["Digital Signature", "Certificate Sign"]) - - self.assertEqual(set(["Digital Signature"]), - v._remove_ca_key_usage(contains_ca_key_usage)) - - def test_remove_ca_key_usage_crl_sign(self): - contains_ca_key_usage = set(["Digital Signature", "CRL Sign"]) - - self.assertEqual(set(["Digital Signature"]), - v._remove_ca_key_usage(contains_ca_key_usage)) - - def test_remove_ca_key_usage_without_ca_usage(self): - contains_ca_key_usage = set(["Digital Signature"]) - - self.assertEqual(set(["Digital Signature"]), - v._remove_ca_key_usage(contains_ca_key_usage)) diff --git a/magnum/tests/unit/conductor/__init__.py b/magnum/tests/unit/conductor/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/conductor/handlers/__init__.py b/magnum/tests/unit/conductor/handlers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/conductor/handlers/common/__init__.py b/magnum/tests/unit/conductor/handlers/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py b/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py deleted file mode 100644 index 9dedd809..00000000 --- a/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from magnum.common import exception -from magnum.conductor.handlers.common import cert_manager -from magnum.tests import base - - -class CertManagerTestCase(base.BaseTestCase): - def setUp(self): - super(CertManagerTestCase, self).setUp() - - cert_manager_patcher = mock.patch.object(cert_manager, 'cert_manager') - self.cert_manager = cert_manager_patcher.start() - self.addCleanup(cert_manager_patcher.stop) - - self.cert_manager_backend = mock.MagicMock() - self.cert_manager.get_backend.return_value = self.cert_manager_backend - - self.cert_manager_backend.CertManager = mock.MagicMock() - self.CertManager = self.cert_manager_backend.CertManager - - @mock.patch('magnum.common.x509.operations.generate_ca_certificate') - @mock.patch('magnum.common.short_id.generate_id') - def test_generate_ca_cert(self, mock_generate_id, mock_generate_ca_cert): - expected_ca_name = 'ca-name' - expected_ca_password = 'password' - expected_ca_cert = { - 'private_key': 'private_key', 'certificate': 'certificate'} - expected_ca_cert_ref = 'ca_cert_ref' - - mock_generate_id.return_value = expected_ca_password - mock_generate_ca_cert.return_value = expected_ca_cert - - self.CertManager.store_cert.return_value = expected_ca_cert_ref - self.assertEqual((expected_ca_cert_ref, expected_ca_cert, - expected_ca_password), - cert_manager._generate_ca_cert(expected_ca_name)) - - mock_generate_ca_cert.assert_called_once_with( - expected_ca_name, encryption_password=expected_ca_password) - self.CertManager.store_cert.assert_called_once_with( - certificate=expected_ca_cert['certificate'], - private_key=expected_ca_cert['private_key'], - private_key_passphrase=expected_ca_password, - name=expected_ca_name, - context=None - ) - - @mock.patch('magnum.common.x509.operations.generate_client_certificate') - @mock.patch('magnum.common.short_id.generate_id') - def test_generate_client_cert(self, mock_generate_id, mock_generate_cert): - expected_name = cert_manager.CONDUCTOR_CLIENT_NAME - expected_ca_name = 'ca-name' - expected_password = 'password' - expected_ca_password = 'ca-password' - expected_cert = { - 'private_key': 'private_key', 'certificate': 'certificate'} - expected_ca_cert = { - 'private_key': 'ca_private_key', 'certificate': 'ca_certificate'} - expected_cert_ref = 'cert_ref' - - mock_generate_id.return_value = expected_password - mock_generate_cert.return_value = expected_cert - - self.CertManager.store_cert.return_value = expected_cert_ref - - self.assertEqual( - expected_cert_ref, - cert_manager._generate_client_cert( - expected_ca_name, - expected_ca_cert, - expected_ca_password)) - - mock_generate_cert.assert_called_once_with( - expected_ca_name, - expected_name, - expected_ca_cert['private_key'], - encryption_password=expected_password, - ca_key_password=expected_ca_password, - ) - self.CertManager.store_cert.assert_called_once_with( - certificate=expected_cert['certificate'], - private_key=expected_cert['private_key'], - private_key_passphrase=expected_password, - name=expected_name, - context=None - ) - - def _test_generate_certificates(self, - expected_ca_name, - mock_cluster, - mock_generate_ca_cert, - mock_generate_client_cert): - expected_ca_password = 'ca-password' - expected_ca_cert = { - 'private_key': 'ca_private_key', 'certificate': 'ca_certificate'} - expected_cert_ref = 'cert_ref' - expected_ca_cert_ref = 'ca-cert-ref' - - mock_generate_ca_cert.return_value = (expected_ca_cert_ref, - expected_ca_cert, - expected_ca_password) - mock_generate_client_cert.return_value = expected_cert_ref - - cert_manager.generate_certificates_to_cluster(mock_cluster) - self.assertEqual(expected_ca_cert_ref, mock_cluster.ca_cert_ref) - self.assertEqual(expected_cert_ref, mock_cluster.magnum_cert_ref) - - mock_generate_ca_cert.assert_called_once_with(expected_ca_name, - context=None) - mock_generate_client_cert.assert_called_once_with( - expected_ca_name, expected_ca_cert, expected_ca_password, - context=None) - - @mock.patch('magnum.conductor.handlers.common.cert_manager.' - '_generate_client_cert') - @mock.patch('magnum.conductor.handlers.common.cert_manager.' - '_generate_ca_cert') - def test_generate_certificates(self, mock_generate_ca_cert, - mock_generate_client_cert): - expected_ca_name = 'ca-name' - mock_cluster = mock.MagicMock() - mock_cluster.name = expected_ca_name - - self._test_generate_certificates(expected_ca_name, - mock_cluster, - mock_generate_ca_cert, - mock_generate_client_cert) - - @mock.patch('magnum.conductor.handlers.common.cert_manager.' - '_generate_client_cert') - @mock.patch('magnum.conductor.handlers.common.cert_manager.' - '_generate_ca_cert') - def test_generate_certificates_without_name(self, mock_generate_ca_cert, - mock_generate_client_cert): - expected_ca_name = 'ca-uuid' - mock_cluster = mock.MagicMock() - mock_cluster.name = None - mock_cluster.uuid = expected_ca_name - - self._test_generate_certificates(expected_ca_name, - mock_cluster, - mock_generate_ca_cert, - mock_generate_client_cert) - - @mock.patch('magnum.conductor.handlers.common.cert_manager.' - '_get_issuer_name') - def test_generate_certificates_with_error(self, mock_get_issuer_name): - mock_cluster = mock.MagicMock() - mock_get_issuer_name.side_effect = exception.MagnumException() - - self.assertRaises(exception.CertificatesToClusterFailed, - cert_manager.generate_certificates_to_cluster, - mock_cluster) - - @mock.patch('magnum.common.x509.operations.sign') - def test_sign_node_certificate(self, mock_x509_sign): - mock_cluster = mock.MagicMock() - mock_cluster.uuid = "mock_cluster_uuid" - mock_ca_cert = mock.MagicMock() - mock_ca_cert.get_private_key.return_value = mock.sentinel.priv_key - passphrase = mock.sentinel.passphrase - mock_ca_cert.get_private_key_passphrase.return_value = passphrase - self.CertManager.get_cert.return_value = mock_ca_cert - mock_csr = mock.MagicMock() - mock_x509_sign.return_value = mock.sentinel.signed_cert - - cluster_ca_cert = cert_manager.sign_node_certificate(mock_cluster, - mock_csr) - - self.CertManager.get_cert.assert_called_once_with( - mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, - context=None) - mock_x509_sign.assert_called_once_with(mock_csr, mock_cluster.name, - mock.sentinel.priv_key, - passphrase) - self.assertEqual(mock.sentinel.signed_cert, cluster_ca_cert) - - @mock.patch('magnum.common.x509.operations.sign') - def test_sign_node_certificate_without_cluster_name(self, mock_x509_sign): - mock_cluster = mock.MagicMock() - mock_cluster.name = None - mock_cluster.uuid = "mock_cluster_uuid" - mock_ca_cert = mock.MagicMock() - mock_ca_cert.get_private_key.return_value = mock.sentinel.priv_key - passphrase = mock.sentinel.passphrase - mock_ca_cert.get_private_key_passphrase.return_value = passphrase - self.CertManager.get_cert.return_value = mock_ca_cert - mock_csr = mock.MagicMock() - mock_x509_sign.return_value = mock.sentinel.signed_cert - - cluster_ca_cert = cert_manager.sign_node_certificate(mock_cluster, - mock_csr) - - self.CertManager.get_cert.assert_called_once_with( - mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, - context=None) - mock_x509_sign.assert_called_once_with(mock_csr, mock_cluster.uuid, - mock.sentinel.priv_key, - passphrase) - self.assertEqual(mock.sentinel.signed_cert, cluster_ca_cert) - - def test_get_cluster_ca_certificate(self): - mock_cluster = mock.MagicMock() - mock_cluster.uuid = "mock_cluster_uuid" - mock_ca_cert = mock.MagicMock() - self.CertManager.get_cert.return_value = mock_ca_cert - - cluster_ca_cert = cert_manager.get_cluster_ca_certificate(mock_cluster) - - self.CertManager.get_cert.assert_called_once_with( - mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid, - context=None) - self.assertEqual(mock_ca_cert, cluster_ca_cert) - - def test_delete_certtificate(self): - mock_delete_cert = self.CertManager.delete_cert - expected_cert_ref = 'cert_ref' - expected_ca_cert_ref = 'ca_cert_ref' - mock_cluster = mock.MagicMock() - mock_cluster.uuid = "mock_cluster_uuid" - mock_cluster.ca_cert_ref = expected_ca_cert_ref - mock_cluster.magnum_cert_ref = expected_cert_ref - - cert_manager.delete_certificates_from_cluster(mock_cluster) - mock_delete_cert.assert_any_call(expected_ca_cert_ref, - resource_ref=mock_cluster.uuid, - context=None) - mock_delete_cert.assert_any_call(expected_cert_ref, - resource_ref=mock_cluster.uuid, - context=None) - - def test_delete_certtificate_if_raise_error(self): - mock_delete_cert = self.CertManager.delete_cert - expected_cert_ref = 'cert_ref' - expected_ca_cert_ref = 'ca_cert_ref' - mock_cluster = mock.MagicMock() - mock_cluster.ca_cert_ref = expected_ca_cert_ref - mock_cluster.magnum_cert_ref = expected_cert_ref - - mock_delete_cert.side_effect = ValueError - - cert_manager.delete_certificates_from_cluster(mock_cluster) - mock_delete_cert.assert_any_call(expected_ca_cert_ref, - resource_ref=mock_cluster.uuid, - context=None) - mock_delete_cert.assert_any_call(expected_cert_ref, - resource_ref=mock_cluster.uuid, - context=None) - - def test_delete_certtificate_without_cert_ref(self): - mock_delete_cert = self.CertManager.delete_cert - mock_cluster = mock.MagicMock() - mock_cluster.ca_cert_ref = None - mock_cluster.magnum_cert_ref = None - - cert_manager.delete_certificates_from_cluster(mock_cluster) - self.assertFalse(mock_delete_cert.called) diff --git a/magnum/tests/unit/conductor/handlers/common/test_trust_manager.py b/magnum/tests/unit/conductor/handlers/common/test_trust_manager.py deleted file mode 100644 index 3860bb8b..00000000 --- a/magnum/tests/unit/conductor/handlers/common/test_trust_manager.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2016 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from mock import patch - -from magnum.common import exception -from magnum.conductor.handlers.common import trust_manager -from magnum.tests import base - - -class TrustManagerTestCase(base.BaseTestCase): - def setUp(self): - super(TrustManagerTestCase, self).setUp() - - osc_class_patcher = patch('magnum.common.clients.OpenStackClients') - osc_class = osc_class_patcher.start() - self.addCleanup(osc_class_patcher.stop) - - self.osc = mock.MagicMock() - osc_class.return_value = self.osc - - @patch('magnum.common.utils.generate_password') - def test_create_trustee_and_trust(self, mock_generate_password): - mock_password = "password_mock" - mock_generate_password.return_value = mock_password - mock_cluster = mock.MagicMock() - mock_cluster.uuid = 'mock_cluster_uuid' - mock_cluster.project_id = 'mock_cluster_project_id' - mock_keystone = mock.MagicMock() - mock_trustee = mock.MagicMock() - mock_trustee.id = 'mock_trustee_id' - mock_trustee.name = 'mock_trustee_username' - mock_trust = mock.MagicMock() - mock_trust.id = 'mock_trust_id' - - self.osc.keystone.return_value = mock_keystone - - mock_keystone.create_trustee.return_value = mock_trustee - mock_keystone.create_trust.return_value = mock_trust - - trust_manager.create_trustee_and_trust(self.osc, mock_cluster) - - mock_keystone.create_trustee.assert_called_once_with( - '%s_%s' % (mock_cluster.uuid, mock_cluster.project_id), - mock_password, - ) - mock_keystone.create_trust.assert_called_once_with( - mock_trustee.id, - ) - self.assertEqual(mock_trustee.name, mock_cluster.trustee_username) - self.assertEqual(mock_trustee.id, mock_cluster.trustee_user_id) - self.assertEqual(mock_password, mock_cluster.trustee_password) - self.assertEqual(mock_trust.id, mock_cluster.trust_id) - - @patch('magnum.common.utils.generate_password') - def test_create_trustee_and_trust_with_error(self, mock_generate_password): - mock_cluster = mock.MagicMock() - mock_generate_password.side_effect = exception.MagnumException() - - self.assertRaises(exception.TrusteeOrTrustToClusterFailed, - trust_manager.create_trustee_and_trust, - self.osc, - mock_cluster) - - def test_delete_trustee_and_trust(self): - mock_cluster = mock.MagicMock() - mock_cluster.trust_id = 'trust_id' - mock_cluster.trustee_user_id = 'trustee_user_id' - mock_keystone = mock.MagicMock() - self.osc.keystone.return_value = mock_keystone - context = mock.MagicMock() - - trust_manager.delete_trustee_and_trust(self.osc, context, - mock_cluster) - - mock_keystone.delete_trust.assert_called_once_with( - context, mock_cluster - ) - mock_keystone.delete_trustee.assert_called_once_with( - mock_cluster.trustee_user_id, - ) - - def test_delete_trustee_and_trust_without_trust_id(self): - mock_cluster = mock.MagicMock() - mock_cluster.trust_id = None - mock_cluster.trustee_user_id = 'trustee_user_id' - mock_keystone = mock.MagicMock() - self.osc.keystone.return_value = mock_keystone - context = mock.MagicMock() - - trust_manager.delete_trustee_and_trust(self.osc, context, - mock_cluster) - - self.assertEqual(0, mock_keystone.delete_trust.call_count) - mock_keystone.delete_trustee.assert_called_once_with( - mock_cluster.trustee_user_id, - ) - - def test_delete_trustee_and_trust_without_trustee_user_id(self): - mock_cluster = mock.MagicMock() - mock_cluster.trust_id = 'trust_id' - mock_cluster.trustee_user_id = None - mock_keystone = mock.MagicMock() - self.osc.keystone.return_value = mock_keystone - context = mock.MagicMock() - - trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster) - - mock_keystone.delete_trust.assert_called_once_with( - context, mock_cluster - ) - self.assertEqual(0, mock_keystone.delete_trustee.call_count) diff --git a/magnum/tests/unit/conductor/handlers/test_ca_conductor.py b/magnum/tests/unit/conductor/handlers/test_ca_conductor.py deleted file mode 100644 index 72ae38d5..00000000 --- a/magnum/tests/unit/conductor/handlers/test_ca_conductor.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from magnum.conductor.handlers import ca_conductor -from magnum.tests import base - - -class TestSignConductor(base.TestCase): - def setUp(self): - super(TestSignConductor, self).setUp() - self.ca_handler = ca_conductor.Handler() - - @mock.patch.object(ca_conductor, 'cert_manager') - def test_sign_certificate(self, mock_cert_manager): - mock_cluster = mock.MagicMock() - mock_certificate = mock.MagicMock() - mock_certificate.csr = 'fake-csr' - mock_cert_manager.sign_node_certificate.return_value = 'fake-pem' - - actual_cert = self.ca_handler.sign_certificate(self.context, - mock_cluster, - mock_certificate) - - mock_cert_manager.sign_node_certificate.assert_called_once_with( - mock_cluster, 'fake-csr', context=self.context - ) - self.assertEqual('fake-pem', actual_cert.pem) - - @mock.patch.object(ca_conductor, 'cert_manager') - def test_get_ca_certificate(self, mock_cert_manager): - mock_cluster = mock.MagicMock() - mock_cluster.uuid = 'cluster-uuid' - mock_cluster.user_id = 'user-id' - mock_cluster.project_id = 'project-id' - mock_cert = mock.MagicMock() - mock_cert.get_certificate.return_value = 'fake-pem' - mock_cert_manager.get_cluster_ca_certificate.return_value = mock_cert - - actual_cert = self.ca_handler.get_ca_certificate(self.context, - mock_cluster) - - self.assertEqual(mock_cluster.uuid, actual_cert.cluster_uuid) - self.assertEqual(mock_cluster.user_id, actual_cert.user_id) - self.assertEqual(mock_cluster.project_id, actual_cert.project_id) - self.assertEqual('fake-pem', actual_cert.pem) diff --git a/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py deleted file mode 100644 index 251af785..00000000 --- a/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py +++ /dev/null @@ -1,516 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from heatclient import exc -import mock -from mock import patch -from oslo_service import loopingcall -from pycadf import cadftaxonomy as taxonomy - -from magnum.common import exception -from magnum.conductor.handlers import cluster_conductor -import magnum.conf -from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_atomic_dr -from magnum import objects -from magnum.objects.fields import ClusterStatus as cluster_status -from magnum.tests import fake_notifier -from magnum.tests.unit.db import base as db_base -from magnum.tests.unit.db import utils - -CONF = magnum.conf.CONF - - -class TestHandler(db_base.DbTestCase): - - def setUp(self): - super(TestHandler, self).setUp() - self.handler = cluster_conductor.Handler() - cluster_template_dict = utils.get_test_cluster_template() - self.cluster_template = objects.ClusterTemplate( - self.context, **cluster_template_dict) - self.cluster_template.create() - cluster_dict = utils.get_test_cluster(node_count=1) - self.cluster = objects.Cluster(self.context, **cluster_dict) - self.cluster.create() - - @patch('magnum.conductor.scale_manager.get_scale_manager') - @patch('magnum.drivers.common.driver.Driver.get_driver') - @patch('magnum.common.clients.OpenStackClients') - def test_update_node_count_success( - self, mock_openstack_client_class, - mock_driver, - mock_scale_manager): - - mock_heat_stack = mock.MagicMock() - mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE - mock_heat_client = mock.MagicMock() - mock_heat_client.stacks.get.return_value = mock_heat_stack - mock_openstack_client = mock_openstack_client_class.return_value - mock_openstack_client.heat.return_value = mock_heat_client - mock_dr = mock.MagicMock() - mock_driver.return_value = mock_dr - - self.cluster.node_count = 2 - self.cluster.status = cluster_status.CREATE_COMPLETE - self.handler.cluster_update(self.context, self.cluster) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(1, len(notifications)) - self.assertEqual( - 'magnum.cluster.update', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - - mock_dr.update_cluster.assert_called_once_with( - self.context, self.cluster, mock_scale_manager.return_value, - False) - cluster = objects.Cluster.get(self.context, self.cluster.uuid) - self.assertEqual(2, cluster.node_count) - - @patch('magnum.common.clients.OpenStackClients') - def test_update_node_count_failure( - self, mock_openstack_client_class): - - mock_heat_stack = mock.MagicMock() - mock_heat_stack.stack_status = cluster_status.CREATE_FAILED - mock_heat_client = mock.MagicMock() - mock_heat_client.stacks.get.return_value = mock_heat_stack - mock_openstack_client = mock_openstack_client_class.return_value - mock_openstack_client.heat.return_value = mock_heat_client - - self.cluster.node_count = 2 - self.cluster.status = cluster_status.CREATE_FAILED - self.assertRaises(exception.NotSupported, self.handler.cluster_update, - self.context, self.cluster) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(1, len(notifications)) - self.assertEqual( - 'magnum.cluster.update', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) - - cluster = objects.Cluster.get(self.context, self.cluster.uuid) - self.assertEqual(1, cluster.node_count) - - @patch('magnum.conductor.scale_manager.get_scale_manager') - @patch('magnum.drivers.common.driver.Driver.get_driver') - @patch('magnum.common.clients.OpenStackClients') - def _test_update_cluster_status_complete( - self, expect_status, mock_openstack_client_class, - mock_driver, mock_scale_manager): - - mock_heat_stack = mock.MagicMock() - mock_heat_stack.stack_status = expect_status - mock_heat_client = mock.MagicMock() - mock_heat_client.stacks.get.return_value = mock_heat_stack - mock_openstack_client = mock_openstack_client_class.return_value - mock_openstack_client.heat.return_value = mock_heat_client - mock_dr = mock.MagicMock() - mock_driver.return_value = mock_dr - - self.cluster.node_count = 2 - self.cluster.status = cluster_status.CREATE_COMPLETE - self.handler.cluster_update(self.context, self.cluster) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(1, len(notifications)) - self.assertEqual( - 'magnum.cluster.update', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - - mock_dr.update_cluster.assert_called_once_with( - self.context, self.cluster, mock_scale_manager.return_value, False) - cluster = objects.Cluster.get(self.context, self.cluster.uuid) - self.assertEqual(2, cluster.node_count) - - def test_update_cluster_status_update_complete(self): - self._test_update_cluster_status_complete( - cluster_status.UPDATE_COMPLETE) - - def test_update_cluster_status_resume_complete(self): - self._test_update_cluster_status_complete( - cluster_status.RESUME_COMPLETE) - - def test_update_cluster_status_restore_complete(self): - self._test_update_cluster_status_complete( - cluster_status.RESTORE_COMPLETE) - - def test_update_cluster_status_rollback_complete(self): - self._test_update_cluster_status_complete( - cluster_status.ROLLBACK_COMPLETE) - - def test_update_cluster_status_snapshot_complete(self): - self._test_update_cluster_status_complete( - cluster_status.SNAPSHOT_COMPLETE) - - def test_update_cluster_status_check_complete(self): - self._test_update_cluster_status_complete( - cluster_status.CHECK_COMPLETE) - - def test_update_cluster_status_adopt_complete(self): - self._test_update_cluster_status_complete( - cluster_status.ADOPT_COMPLETE) - - @patch('magnum.drivers.heat.driver.HeatPoller') - @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.drivers.common.driver.Driver.get_driver') - @patch('magnum.common.clients.OpenStackClients') - def test_create(self, mock_openstack_client_class, - mock_driver, mock_cm, mock_trust_manager, - mock_heat_poller_class): - timeout = 15 - mock_poller = mock.MagicMock() - mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() - mock_heat_poller_class.return_value = mock_poller - osc = mock.sentinel.osc - - def return_keystone(): - return self.keystone_client - - osc.keystone = return_keystone - mock_openstack_client_class.return_value = osc - mock_dr = mock.MagicMock() - mock_driver.return_value = mock_dr - - def create_stack_side_effect(context, osc, cluster, timeout): - return {'stack': {'id': 'stack-id'}} - - mock_dr.create_stack.side_effect = create_stack_side_effect - - # FixMe(eliqiao): cluster_create will call cluster.create() - # again, this so bad because we have already called it in setUp - # since other test case will share the codes in setUp() - # But in self.handler.cluster_create, we update cluster.uuid and - # cluster.stack_id so cluster.create will create a new recored with - # clustermodel_id None, this is bad because we load clusterModel - # object in cluster object by clustermodel_id. Here update - # self.cluster.clustermodel_id so cluster.obj_get_changes will get - # notice that clustermodel_id is updated and will update it - # in db. - self.cluster.cluster_template_id = self.cluster_template.uuid - cluster = self.handler.cluster_create(self.context, - self.cluster, timeout) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(1, len(notifications)) - self.assertEqual( - 'magnum.cluster.create', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - - mock_dr.create_cluster.assert_called_once_with(self.context, - self.cluster, timeout) - mock_cm.generate_certificates_to_cluster.assert_called_once_with( - self.cluster, context=self.context) - self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) - mock_trust_manager.create_trustee_and_trust.assert_called_once_with( - osc, self.cluster) - - def _test_create_failed(self, - mock_openstack_client_class, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create, - expected_exception, - is_create_cert_called=True, - is_create_trust_called=True): - osc = mock.MagicMock() - mock_openstack_client_class.return_value = osc - timeout = 15 - - self.assertRaises( - expected_exception, - self.handler.cluster_create, - self.context, - self.cluster, timeout - ) - - gctb = mock_cert_manager.generate_certificates_to_cluster - if is_create_cert_called: - gctb.assert_called_once_with(self.cluster, context=self.context) - else: - gctb.assert_not_called() - ctat = mock_trust_manager.create_trustee_and_trust - if is_create_trust_called: - ctat.assert_called_once_with(osc, self.cluster) - else: - ctat.assert_not_called() - mock_cluster_create.assert_called_once_with() - - @patch('magnum.objects.Cluster.create') - @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.drivers.common.driver.Driver.get_driver') - @patch('magnum.common.clients.OpenStackClients') - def test_create_handles_bad_request(self, mock_openstack_client_class, - mock_driver, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create): - mock_dr = mock.MagicMock() - mock_driver.return_value = mock_dr - mock_dr.create_cluster.side_effect = exc.HTTPBadRequest - - self._test_create_failed( - mock_openstack_client_class, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create, - exception.InvalidParameterValue - ) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(2, len(notifications)) - self.assertEqual( - 'magnum.cluster.create', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - self.assertEqual( - 'magnum.cluster.create', notifications[1].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) - - @patch('magnum.objects.Cluster.create') - @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.common.clients.OpenStackClients') - def test_create_with_cert_failed(self, mock_openstack_client_class, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create): - e = exception.CertificatesToClusterFailed(cluster_uuid='uuid') - mock_cert_manager.generate_certificates_to_cluster.side_effect = e - - self._test_create_failed( - mock_openstack_client_class, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create, - exception.CertificatesToClusterFailed - ) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(1, len(notifications)) - self.assertEqual( - 'magnum.cluster.create', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) - - @patch('magnum.objects.Cluster.create') - @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.common.clients.OpenStackClients') - def test_create_with_trust_failed(self, mock_openstack_client_class, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create): - e = exception.TrusteeOrTrustToClusterFailed(cluster_uuid='uuid') - mock_trust_manager.create_trustee_and_trust.side_effect = e - - self._test_create_failed( - mock_openstack_client_class, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create, - exception.TrusteeOrTrustToClusterFailed, - False - ) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(1, len(notifications)) - self.assertEqual( - 'magnum.cluster.create', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome']) - - @patch('magnum.objects.Cluster.create') - @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.drivers.common.driver.Driver.get_driver') - @patch('magnum.common.clients.OpenStackClients') - def test_create_with_invalid_unicode_name(self, - mock_openstack_client_class, - mock_driver, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create): - error_message = six.u("""Invalid stack name 测试集群-zoyh253geukk - must contain only alphanumeric or "_-." - characters, must start with alpha""") - mock_dr = mock.MagicMock() - mock_driver.return_value = mock_dr - mock_dr.create_cluster.side_effect = exc.HTTPBadRequest(error_message) - - self._test_create_failed( - mock_openstack_client_class, - mock_cert_manager, - mock_trust_manager, - mock_cluster_create, - exception.InvalidParameterValue - ) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(2, len(notifications)) - self.assertEqual( - 'magnum.cluster.create', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - self.assertEqual( - 'magnum.cluster.create', notifications[1].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) - - @patch('magnum.drivers.heat.driver.HeatPoller') - @patch('heatclient.common.template_utils' - '.process_multiple_environments_and_files') - @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' - '_extract_template_definition') - @patch('magnum.drivers.common.driver.Driver.get_driver') - @patch('magnum.common.clients.OpenStackClients') - @patch('magnum.common.short_id.generate_id') - def test_create_with_environment(self, - mock_short_id, - mock_openstack_client_class, - mock_driver, - mock_extract_tmpl_def, - mock_cert_manager, - mock_trust_manager, - mock_get_template_contents, - mock_process_mult, - mock_heat_poller_class): - timeout = 15 - self.cluster.cluster_template_id = self.cluster_template.uuid - self.cluster.name = 'cluster1' - cluster_name = self.cluster.name - mock_poller = mock.MagicMock() - mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() - mock_heat_poller_class.return_value = mock_poller - mock_driver.return_value = k8s_atomic_dr.Driver() - mock_short_id.return_value = 'short_id' - - mock_extract_tmpl_def.return_value = ( - 'the/template/path.yaml', - {'heat_param_1': 'foo', 'heat_param_2': 'bar'}, - ['env_file_1', 'env_file_2']) - - mock_get_template_contents.return_value = ( - {'tmpl_file_1': 'some content', - 'tmpl_file_2': 'some more content'}, - 'some template yaml') - - def do_mock_process_mult(env_paths=None, env_list_tracker=None): - self.assertEqual(env_list_tracker, []) - for f in env_paths: - env_list_tracker.append('file:///' + f) - env_map = {path: 'content of ' + path for path in env_list_tracker} - return (env_map, None) - - mock_process_mult.side_effect = do_mock_process_mult - - mock_hc = mock.Mock() - mock_hc.stacks.create.return_value = {'stack': {'id': 'stack-id'}} - - osc = mock.Mock() - osc.heat.return_value = mock_hc - mock_openstack_client_class.return_value = osc - - self.handler.cluster_create(self.context, self.cluster, timeout) - - mock_extract_tmpl_def.assert_called_once_with(self.context, - self.cluster) - mock_get_template_contents.assert_called_once_with( - 'the/template/path.yaml') - mock_process_mult.assert_called_once_with( - env_paths=['the/template/env_file_1', 'the/template/env_file_2'], - env_list_tracker=mock.ANY) - mock_hc.stacks.create.assert_called_once_with( - environment_files=['file:///the/template/env_file_1', - 'file:///the/template/env_file_2'], - files={ - 'tmpl_file_1': 'some content', - 'tmpl_file_2': 'some more content', - 'file:///the/template/env_file_1': - 'content of file:///the/template/env_file_1', - 'file:///the/template/env_file_2': - 'content of file:///the/template/env_file_2' - }, - parameters={'heat_param_1': 'foo', 'heat_param_2': 'bar'}, - stack_name=('%s-short_id' % cluster_name), - template='some template yaml', - timeout_mins=timeout) - - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.common.clients.OpenStackClients') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_cluster_delete(self, mock_driver, mock_openstack_client_class, - cert_manager): - mock_driver.return_value = k8s_atomic_dr.Driver() - osc = mock.MagicMock() - mock_openstack_client_class.return_value = osc - osc.heat.side_effect = exc.HTTPNotFound - self.handler.cluster_delete(self.context, self.cluster.uuid) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(2, len(notifications)) - self.assertEqual( - 'magnum.cluster.delete', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - self.assertEqual( - 'magnum.cluster.delete', notifications[1].event_type) - self.assertEqual( - taxonomy.OUTCOME_SUCCESS, notifications[1].payload['outcome']) - self.assertEqual( - 1, cert_manager.delete_certificates_from_cluster.call_count) - # The cluster has been destroyed - self.assertRaises(exception.ClusterNotFound, - objects.Cluster.get, self.context, self.cluster.uuid) - - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.common.clients.OpenStackClients') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_cluster_delete_conflict(self, mock_driver, - mock_openstack_client_class, - cert_manager): - mock_driver.return_value = k8s_atomic_dr.Driver() - osc = mock.MagicMock() - mock_openstack_client_class.return_value = osc - osc.heat.side_effect = exc.HTTPConflict - self.assertRaises(exception.OperationInProgress, - self.handler.cluster_delete, - self.context, - self.cluster.uuid) - - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(2, len(notifications)) - self.assertEqual( - 'magnum.cluster.delete', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - self.assertEqual( - 'magnum.cluster.delete', notifications[1].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) - self.assertEqual( - 0, cert_manager.delete_certificates_from_cluster.call_count) diff --git a/magnum/tests/unit/conductor/handlers/test_conductor_listener.py b/magnum/tests/unit/conductor/handlers/test_conductor_listener.py deleted file mode 100644 index 4ca16334..00000000 --- a/magnum/tests/unit/conductor/handlers/test_conductor_listener.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from magnum.conductor.handlers import conductor_listener -from magnum.tests import base - - -class TestHandler(base.BaseTestCase): - - def setUp(self): - super(TestHandler, self).setUp() - self.handler = conductor_listener.Handler() - - def test_ping_conductor(self): - self.assertTrue(self.handler.ping_conductor({})) diff --git a/magnum/tests/unit/conductor/handlers/test_indirection_api.py b/magnum/tests/unit/conductor/handlers/test_indirection_api.py deleted file mode 100644 index 2c0e124e..00000000 --- a/magnum/tests/unit/conductor/handlers/test_indirection_api.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_messaging as messaging -from oslo_versionedobjects import fields - -from magnum.conductor.handlers import indirection_api -from magnum.objects import base as obj_base -from magnum.tests import base - - -class TestIndirectionApiConductor(base.TestCase): - def setUp(self): - super(TestIndirectionApiConductor, self).setUp() - self.conductor = indirection_api.Handler() - - def _test_object_action(self, is_classmethod, raise_exception): - @obj_base.MagnumObjectRegistry.register - class TestObject(obj_base.MagnumObject): - def foo(self, context, raise_exception=False): - if raise_exception: - raise Exception('test') - else: - return 'test' - - @classmethod - def bar(cls, context, raise_exception=False): - if raise_exception: - raise Exception('test') - else: - return 'test' - - obj = TestObject() - if is_classmethod: - result = self.conductor.object_class_action( - self.context, TestObject.obj_name(), 'bar', '1.0', - tuple(), {'raise_exception': raise_exception}) - else: - updates, result = self.conductor.object_action( - self.context, obj, 'foo', tuple(), - {'raise_exception': raise_exception}) - self.assertEqual('test', result) - - def test_object_action(self): - self._test_object_action(False, False) - - def test_object_action_on_raise(self): - self.assertRaises(messaging.ExpectedException, - self._test_object_action, False, True) - - def test_object_class_action(self): - self._test_object_action(True, False) - - def test_object_class_action_on_raise(self): - self.assertRaises(messaging.ExpectedException, - self._test_object_action, True, True) - - def test_object_action_copies_object(self): - @obj_base.MagnumObjectRegistry.register - class TestObject(obj_base.MagnumObject): - fields = {'dict': fields.DictOfStringsField()} - - def touch_dict(self, context): - self.dict['foo'] = 'bar' - self.obj_reset_changes() - - obj = TestObject() - obj.dict = {} - obj.obj_reset_changes() - updates, result = self.conductor.object_action( - self.context, obj, 'touch_dict', tuple(), {}) - # NOTE(danms): If conductor did not properly copy the object, then - # the new and reference copies of the nested dict object will be - # the same, and thus 'dict' will not be reported as changed - self.assertIn('dict', updates) - self.assertEqual({'foo': 'bar'}, updates['dict']) diff --git a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py deleted file mode 100644 index 598f8127..00000000 --- a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py +++ /dev/null @@ -1,906 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from mock import patch - -import magnum.conf -from magnum.drivers.k8s_coreos_v1 import driver as k8s_coreos_dr -from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_dr -from magnum import objects -from magnum.tests import base - -CONF = magnum.conf.CONF - - -class TestClusterConductorWithK8s(base.TestCase): - def setUp(self): - super(TestClusterConductorWithK8s, self).setUp() - self.cluster_template_dict = { - 'image_id': 'image_id', - 'flavor_id': 'flavor_id', - 'master_flavor_id': 'master_flavor_id', - 'keypair_id': 'keypair_id', - 'dns_nameserver': 'dns_nameserver', - 'external_network_id': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'network_driver': 'network_driver', - 'volume_driver': 'volume_driver', - 'docker_volume_size': 20, - 'docker_storage_driver': 'devicemapper', - 'cluster_distro': 'fedora-atomic', - 'coe': 'kubernetes', - 'token': None, - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'labels': {'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'system_pods_initial_delay': '15', - 'system_pods_timeout': '1', - 'admission_control_list': 'fake_list', - 'prometheus_monitoring': 'False', - 'grafana_admin_passwd': 'fake_pwd', - 'kube_dashboard_enabled': 'True', - 'docker_volume_type': 'lvmdriver-1', - 'etcd_volume_size': '0'}, - 'tls_disabled': False, - 'server_type': 'vm', - 'registry_enabled': False, - 'insecure_registry': '10.0.0.1:5000', - 'master_lb_enabled': False, - 'floating_ip_enabled': False, - } - self.cluster_dict = { - 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'cluster_template_id': 'xx-xx-xx-xx', - 'keypair': 'keypair_id', - 'name': 'cluster1', - 'stack_id': 'xx-xx-xx-xx', - 'api_address': '172.17.2.3', - 'node_addresses': ['172.17.2.4'], - 'node_count': 1, - 'master_count': 1, - 'discovery_url': 'https://discovery.etcd.io/test', - 'docker_volume_size': 20, - 'master_addresses': ['172.17.2.18'], - 'ca_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx', - 'magnum_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx', - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - 'coe_version': 'fake-version', - } - self.context.user_name = 'fake_user' - self.context.tenant = 'fake_tenant' - osc_patcher = mock.patch('magnum.common.clients.OpenStackClients') - self.mock_osc_class = osc_patcher.start() - self.addCleanup(osc_patcher.stop) - self.mock_osc = mock.MagicMock() - self.mock_osc.url_for.return_value = 'http://192.168.10.10:5000/v3' - self.mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' - self.mock_osc.cinder_region_name.return_value = 'RegionOne' - self.mock_keystone = mock.MagicMock() - self.mock_keystone.trustee_domain_id = 'trustee_domain_id' - self.mock_osc.keystone.return_value = self.mock_keystone - self.mock_osc_class.return_value = self.mock_osc - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - self._test_extract_template_definition( - mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get) - - def _test_extract_template_definition( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr=None): - if missing_attr in self.cluster_template_dict: - self.cluster_template_dict[missing_attr] = None - elif missing_attr in self.cluster_dict: - self.cluster_dict[missing_attr] = None - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - cluster = objects.Cluster(self.context, **self.cluster_dict) - mock_driver.return_value = k8s_dr.Driver() - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - mapping = { - 'dns_nameserver': 'dns_nameserver', - 'image_id': 'server_image', - 'flavor_id': 'minion_flavor', - 'docker_volume_size': 'docker_volume_size', - 'docker_storage_driver': 'docker_storage_driver', - 'network_driver': 'network_driver', - 'volume_driver': 'volume_driver', - 'master_flavor_id': 'master_flavor', - 'apiserver_port': '', - 'node_count': 'number_of_minions', - 'master_count': 'number_of_masters', - 'discovery_url': 'discovery_url', - 'labels': {'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'system_pods_initial_delay': '15', - 'system_pods_timeout': '1', - 'admission_control_list': 'fake_list', - 'prometheus_monitoring': 'False', - 'grafana_admin_passwd': 'fake_pwd', - 'kube_dashboard_enabled': 'True', - 'docker_volume_type': 'lvmdriver-1', - 'etcd_volume_size': '0'}, - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'cluster_uuid': self.cluster_dict['uuid'], - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'tls_disabled': False, - 'insecure_registry': '10.0.0.1:5000', - } - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'network_driver': 'network_driver', - 'volume_driver': 'volume_driver', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'minion_flavor': 'flavor_id', - 'master_flavor': 'master_flavor_id', - 'number_of_minions': 1, - 'number_of_masters': 1, - 'docker_volume_size': 20, - 'docker_volume_type': 'lvmdriver-1', - 'docker_storage_driver': 'devicemapper', - 'discovery_url': 'https://discovery.etcd.io/test', - 'etcd_volume_size': '0', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'system_pods_initial_delay': '15', - 'system_pods_timeout': '1', - 'admission_control_list': 'fake_list', - 'prometheus_monitoring': 'False', - 'grafana_admin_passwd': 'fake_pwd', - 'kube_dashboard_enabled': 'True', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'tenant_name': 'fake_tenant', - 'username': 'fake_user', - 'cluster_uuid': self.cluster_dict['uuid'], - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'region_name': self.mock_osc.cinder_region_name.return_value, - 'tls_disabled': False, - 'registry_enabled': False, - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': '', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'insecure_registry_url': '10.0.0.1:5000', - 'kube_version': 'fake-version', - } - if missing_attr is not None: - expected.pop(mapping[missing_attr], None) - - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/no_etcd_volume.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/no_master_lb.yaml', - '../../common/templates/environments/disable_floating_ip.yaml', - ], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_with_registry( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - self.cluster_template_dict['registry_enabled'] = True - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - cluster = objects.Cluster(self.context, **self.cluster_dict) - mock_driver.return_value = k8s_dr.Driver() - - CONF.set_override('swift_region', - 'RegionOne', - group='docker_registry') - - CONF.set_override('cluster_user_trust', - True, - group='trust') - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'auth_url': 'http://192.168.10.10:5000/v3', - 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'discovery_url': 'https://discovery.etcd.io/test', - 'dns_nameserver': 'dns_nameserver', - 'docker_storage_driver': 'devicemapper', - 'docker_volume_size': 20, - 'docker_volume_type': 'lvmdriver-1', - 'etcd_volume_size': '0', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'flannel_backend': 'vxlan', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'system_pods_initial_delay': '15', - 'system_pods_timeout': '1', - 'admission_control_list': 'fake_list', - 'prometheus_monitoring': 'False', - 'grafana_admin_passwd': 'fake_pwd', - 'kube_dashboard_enabled': 'True', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'magnum_url': 'http://127.0.0.1:9511/v1', - 'master_flavor': 'master_flavor_id', - 'minion_flavor': 'flavor_id', - 'network_driver': 'network_driver', - 'no_proxy': 'no_proxy', - 'number_of_masters': 1, - 'number_of_minions': 1, - 'region_name': 'RegionOne', - 'registry_container': 'docker_registry', - 'registry_enabled': True, - 'server_image': 'image_id', - 'ssh_key_name': 'keypair_id', - 'swift_region': 'RegionOne', - 'tenant_name': 'fake_tenant', - 'tls_disabled': False, - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trustee_username': 'fake_trustee', - 'username': 'fake_user', - 'volume_driver': 'volume_driver', - 'insecure_registry_url': '10.0.0.1:5000', - 'kube_version': 'fake-version', - } - - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/no_etcd_volume.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/no_master_lb.yaml', - '../../common/templates/environments/disable_floating_ip.yaml', - ], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_only_required( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - - not_required = ['image_id', 'flavor_id', 'dns_nameserver', - 'docker_volume_size', 'fixed_network', 'http_proxy', - 'https_proxy', 'no_proxy', 'network_driver', - 'master_flavor_id', 'docker_storage_driver', - 'volume_driver', 'fixed_subnet'] - for key in not_required: - self.cluster_template_dict[key] = None - self.cluster_dict['discovery_url'] = 'https://discovery.etcd.io/test' - - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - mock_driver.return_value = k8s_dr.Driver() - cluster = objects.Cluster(self.context, **self.cluster_dict) - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'auth_url': 'http://192.168.10.10:5000/v3', - 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'discovery_url': 'https://discovery.etcd.io/test', - 'docker_volume_size': 20, - 'external_network': 'external_network_id', - 'flannel_backend': 'vxlan', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'system_pods_initial_delay': '15', - 'system_pods_timeout': '1', - 'admission_control_list': 'fake_list', - 'prometheus_monitoring': 'False', - 'grafana_admin_passwd': 'fake_pwd', - 'kube_dashboard_enabled': 'True', - 'docker_volume_type': 'lvmdriver-1', - 'etcd_volume_size': '0', - 'insecure_registry_url': '10.0.0.1:5000', - 'kube_version': 'fake-version', - 'magnum_url': 'http://127.0.0.1:9511/v1', - 'number_of_masters': 1, - 'number_of_minions': 1, - 'region_name': 'RegionOne', - 'registry_enabled': False, - 'ssh_key_name': 'keypair_id', - 'tenant_name': 'fake_tenant', - 'tls_disabled': False, - 'trust_id': '', - 'trustee_domain_id': 'trustee_domain_id', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trustee_username': 'fake_trustee', - 'username': 'fake_user' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/with_private_network.yaml', - '../../common/templates/environments/no_etcd_volume.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/no_master_lb.yaml', - '../../common/templates/environments/disable_floating_ip.yaml', - ], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_coreos_with_disovery( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - self.cluster_template_dict['cluster_distro'] = 'coreos' - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - cluster = objects.Cluster(self.context, **self.cluster_dict) - mock_driver.return_value = k8s_coreos_dr.Driver() - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'minion_flavor': 'flavor_id', - 'master_flavor': 'master_flavor_id', - 'number_of_minions': 1, - 'number_of_masters': 1, - 'network_driver': 'network_driver', - 'volume_driver': 'volume_driver', - 'discovery_url': 'https://discovery.etcd.io/test', - 'etcd_volume_size': '0', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'system_pods_initial_delay': '15', - 'system_pods_timeout': '1', - 'admission_control_list': 'fake_list', - 'prometheus_monitoring': 'False', - 'grafana_admin_passwd': 'fake_pwd', - 'kube_dashboard_enabled': 'True', - 'tls_disabled': False, - 'registry_enabled': False, - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': '', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'cluster_uuid': self.cluster_dict['uuid'], - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'insecure_registry_url': '10.0.0.1:5000', - 'kube_version': 'fake-version', - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/no_master_lb.yaml', - '../../common/templates/environments/disable_floating_ip.yaml'], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_coreos_no_discoveryurl( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - reqget): - self.cluster_template_dict['cluster_distro'] = 'coreos' - self.cluster_dict['discovery_url'] = None - mock_req = mock.MagicMock(text='http://tokentest/h1/h2/h3') - reqget.return_value = mock_req - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - cluster = objects.Cluster(self.context, **self.cluster_dict) - mock_driver.return_value = k8s_coreos_dr.Driver() - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'minion_flavor': 'flavor_id', - 'master_flavor': 'master_flavor_id', - 'number_of_minions': 1, - 'number_of_masters': 1, - 'network_driver': 'network_driver', - 'volume_driver': 'volume_driver', - 'discovery_url': 'http://tokentest/h1/h2/h3', - 'etcd_volume_size': '0', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'system_pods_initial_delay': '15', - 'system_pods_timeout': '1', - 'admission_control_list': 'fake_list', - 'prometheus_monitoring': 'False', - 'grafana_admin_passwd': 'fake_pwd', - 'kube_dashboard_enabled': 'True', - 'tls_disabled': False, - 'registry_enabled': False, - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': '', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'cluster_uuid': self.cluster_dict['uuid'], - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'insecure_registry_url': '10.0.0.1:5000', - 'kube_version': 'fake-version', - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/no_master_lb.yaml', - '../../common/templates/environments/disable_floating_ip.yaml'], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_dns( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - mock_driver.return_value = k8s_dr.Driver() - self._test_extract_template_definition( - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr='dns_nameserver') - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_server_image( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - mock_driver.return_value = k8s_dr.Driver() - self._test_extract_template_definition( - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr='image_id') - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_minion_flavor( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - mock_driver.return_value = k8s_dr.Driver() - self._test_extract_template_definition( - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr='flavor_id') - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_docker_storage_driver( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - mock_driver.return_value = k8s_dr.Driver() - self._test_extract_template_definition( - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr='docker_storage_driver') - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_master_flavor( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - mock_driver.return_value = k8s_dr.Driver() - self._test_extract_template_definition( - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr='master_flavor_id') - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_apiserver_port( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - mock_driver.return_value = k8s_dr.Driver() - self._test_extract_template_definition( - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr='apiserver_port') - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_node_count( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - mock_driver.return_value = k8s_dr.Driver() - self._test_extract_template_definition( - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr='node_count') - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_master_count( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - mock_driver.return_value = k8s_dr.Driver() - self._test_extract_template_definition( - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get, - missing_attr='master_count') - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_without_discovery_url( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - reqget): - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - cluster_dict = self.cluster_dict - cluster_dict['discovery_url'] = None - cluster = objects.Cluster(self.context, **cluster_dict) - mock_driver.return_value = k8s_dr.Driver() - - CONF.set_override('etcd_discovery_service_endpoint_format', - 'http://etcd/test?size=%(size)d', - group='cluster') - mock_req = mock.MagicMock(text='https://address/token') - reqget.return_value = mock_req - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'master_flavor': 'master_flavor_id', - 'minion_flavor': 'flavor_id', - 'number_of_minions': 1, - 'number_of_masters': 1, - 'network_driver': 'network_driver', - 'volume_driver': 'volume_driver', - 'docker_volume_size': 20, - 'docker_volume_type': 'lvmdriver-1', - 'docker_storage_driver': 'devicemapper', - 'discovery_url': 'https://address/token', - 'etcd_volume_size': '0', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'system_pods_initial_delay': '15', - 'system_pods_timeout': '1', - 'admission_control_list': 'fake_list', - 'prometheus_monitoring': 'False', - 'grafana_admin_passwd': 'fake_pwd', - 'kube_dashboard_enabled': 'True', - 'tenant_name': 'fake_tenant', - 'username': 'fake_user', - 'cluster_uuid': self.cluster_dict['uuid'], - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'region_name': self.mock_osc.cinder_region_name.return_value, - 'tls_disabled': False, - 'registry_enabled': False, - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': '', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'insecure_registry_url': '10.0.0.1:5000', - 'kube_version': 'fake-version', - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/no_etcd_volume.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/no_master_lb.yaml', - '../../common/templates/environments/disable_floating_ip.yaml', - ], - env_files) - reqget.assert_called_once_with('http://etcd/test?size=1') - - @patch('magnum.common.short_id.generate_id') - @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' - '_extract_template_definition') - @patch('magnum.common.clients.OpenStackClients') - def test_create_stack(self, - mock_osc, - mock_extract_template_definition, - mock_get_template_contents, - mock_generate_id): - - mock_generate_id.return_value = 'xx-xx-xx-xx' - expected_stack_name = 'expected_stack_name-xx-xx-xx-xx' - expected_template_contents = 'template_contents' - dummy_cluster_name = 'expected_stack_name' - expected_timeout = 15 - - mock_tpl_files = {} - mock_get_template_contents.return_value = [ - mock_tpl_files, expected_template_contents] - mock_extract_template_definition.return_value = ('template/path', - {}, []) - mock_heat_client = mock.MagicMock() - mock_osc.return_value.heat.return_value = mock_heat_client - mock_cluster = mock.MagicMock() - mock_cluster.name = dummy_cluster_name - - k8s_dr.Driver().create_cluster(self.context, mock_cluster, - expected_timeout) - - expected_args = { - 'stack_name': expected_stack_name, - 'parameters': {}, - 'template': expected_template_contents, - 'files': {}, - 'environment_files': [], - 'timeout_mins': expected_timeout - } - mock_heat_client.stacks.create.assert_called_once_with(**expected_args) - - @patch('magnum.common.short_id.generate_id') - @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' - '_extract_template_definition') - @patch('magnum.common.clients.OpenStackClients') - def test_create_stack_no_timeout_specified( - self, - mock_osc, - mock_extract_template_definition, - mock_get_template_contents, - mock_generate_id): - - mock_generate_id.return_value = 'xx-xx-xx-xx' - expected_stack_name = 'expected_stack_name-xx-xx-xx-xx' - expected_template_contents = 'template_contents' - dummy_cluster_name = 'expected_stack_name' - expected_timeout = CONF.cluster_heat.create_timeout - - mock_tpl_files = {} - mock_get_template_contents.return_value = [ - mock_tpl_files, expected_template_contents] - mock_extract_template_definition.return_value = ('template/path', - {}, []) - mock_heat_client = mock.MagicMock() - mock_osc.return_value.heat.return_value = mock_heat_client - mock_cluster = mock.MagicMock() - mock_cluster.name = dummy_cluster_name - - k8s_dr.Driver().create_cluster(self.context, mock_cluster, None) - - expected_args = { - 'stack_name': expected_stack_name, - 'parameters': {}, - 'template': expected_template_contents, - 'files': {}, - 'environment_files': [], - 'timeout_mins': expected_timeout - } - mock_heat_client.stacks.create.assert_called_once_with(**expected_args) - - @patch('magnum.common.short_id.generate_id') - @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' - '_extract_template_definition') - @patch('magnum.common.clients.OpenStackClients') - def test_create_stack_timeout_is_zero( - self, - mock_osc, - mock_extract_template_definition, - mock_get_template_contents, - mock_generate_id): - - mock_generate_id.return_value = 'xx-xx-xx-xx' - expected_stack_name = 'expected_stack_name-xx-xx-xx-xx' - expected_template_contents = 'template_contents' - dummy_cluster_name = 'expected_stack_name' - cluster_timeout = 0 - expected_timeout = CONF.cluster_heat.create_timeout - - mock_tpl_files = {} - mock_get_template_contents.return_value = [ - mock_tpl_files, expected_template_contents] - mock_extract_template_definition.return_value = ('template/path', - {}, []) - mock_heat_client = mock.MagicMock() - mock_osc.return_value.heat.return_value = mock_heat_client - mock_cluster = mock.MagicMock() - mock_cluster.name = dummy_cluster_name - - k8s_dr.Driver().create_cluster(self.context, mock_cluster, - cluster_timeout) - - expected_args = { - 'stack_name': expected_stack_name, - 'parameters': {}, - 'template': expected_template_contents, - 'files': {}, - 'environment_files': [], - 'timeout_mins': expected_timeout - } - mock_heat_client.stacks.create.assert_called_once_with(**expected_args) - - @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' - '_extract_template_definition') - @patch('magnum.common.clients.OpenStackClients') - def test_update_stack(self, - mock_osc, - mock_extract_template_definition, - mock_get_template_contents): - - mock_stack_id = 'xx-xx-xx-xx' - expected_template_contents = 'template_contents' - - mock_tpl_files = {} - mock_get_template_contents.return_value = [ - mock_tpl_files, expected_template_contents] - mock_extract_template_definition.return_value = ('template/path', - {}, []) - mock_heat_client = mock.MagicMock() - mock_osc.return_value.heat.return_value = mock_heat_client - mock_cluster = mock.MagicMock() - mock_cluster.stack_id = mock_stack_id - - k8s_dr.Driver().update_cluster({}, mock_cluster) - - expected_args = { - 'parameters': {}, - 'template': expected_template_contents, - 'files': {}, - 'environment_files': [], - 'disable_rollback': True - } - mock_heat_client.stacks.update.assert_called_once_with(mock_stack_id, - **expected_args) diff --git a/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py deleted file mode 100644 index 2939acfd..00000000 --- a/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py +++ /dev/null @@ -1,347 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from mock import patch - -from magnum.drivers.heat import driver as heat_driver -from magnum.drivers.mesos_ubuntu_v1 import driver as mesos_dr -from magnum import objects -from magnum.objects.fields import ClusterStatus as cluster_status -from magnum.tests import base - - -class TestClusterConductorWithMesos(base.TestCase): - def setUp(self): - super(TestClusterConductorWithMesos, self).setUp() - self.cluster_template_dict = { - 'image_id': 'image_id', - 'flavor_id': 'flavor_id', - 'master_flavor_id': 'master_flavor_id', - 'keypair_id': 'keypair_id', - 'dns_nameserver': 'dns_nameserver', - 'external_network_id': 'external_network_id', - 'cluster_distro': 'ubuntu', - 'coe': 'mesos', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'registry_enabled': False, - 'server_type': 'vm', - 'volume_driver': 'volume_driver', - 'labels': {'rexray_preempt': 'False', - 'mesos_slave_isolation': - 'docker/runtime,filesystem/linux', - 'mesos_slave_image_providers': 'docker', - 'mesos_slave_executor_env_variables': '{}', - 'mesos_slave_work_dir': '/tmp/mesos/slave' - }, - 'master_lb_enabled': False, - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - } - self.cluster_dict = { - 'id': 1, - 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'cluster_template_id': 'xx-xx-xx-xx', - 'keypair': 'keypair_id', - 'name': 'cluster1', - 'stack_id': 'xx-xx-xx-xx', - 'api_address': '172.17.2.3', - 'node_addresses': ['172.17.2.4'], - 'node_count': 1, - 'master_count': 1, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - } - self.context.user_name = 'mesos_user' - self.context.tenant = 'admin' - self.context.domain_name = 'domainname' - osc_patcher = mock.patch('magnum.common.clients.OpenStackClients') - self.mock_osc_class = osc_patcher.start() - self.addCleanup(osc_patcher.stop) - self.mock_osc = mock.MagicMock() - self.mock_osc.cinder_region_name.return_value = 'RegionOne' - self.mock_keystone = mock.MagicMock() - self.mock_keystone.trustee_domain_id = 'trustee_domain_id' - self.mock_osc.keystone.return_value = self.mock_keystone - self.mock_osc_class.return_value = self.mock_osc - self.mock_osc.url_for.return_value = 'http://192.168.10.10:5000/v3' - - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_all_values( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid): - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - cluster = objects.Cluster(self.context, **self.cluster_dict) - mock_driver.return_value = mesos_dr.Driver() - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'master_flavor': 'master_flavor_id', - 'slave_flavor': 'flavor_id', - 'number_of_slaves': 1, - 'number_of_masters': 1, - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'cluster_name': 'cluster1', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': '', - 'volume_driver': 'volume_driver', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'region_name': self.mock_osc.cinder_region_name.return_value, - 'username': 'mesos_user', - 'tenant_name': 'admin', - 'domain_name': 'domainname', - 'rexray_preempt': 'False', - 'mesos_slave_executor_env_variables': '{}', - 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', - 'mesos_slave_work_dir': '/tmp/mesos/slave', - 'mesos_slave_image_providers': 'docker' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/no_master_lb.yaml'], - env_files) - - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_only_required( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid): - not_required = ['image_id', 'master_flavor_id', 'flavor_id', - 'dns_nameserver', 'fixed_network', 'http_proxy', - 'https_proxy', 'no_proxy', 'volume_driver', - 'fixed_subnet'] - for key in not_required: - self.cluster_template_dict[key] = None - - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - cluster = objects.Cluster(self.context, **self.cluster_dict) - mock_driver.return_value = mesos_dr.Driver() - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'number_of_slaves': 1, - 'number_of_masters': 1, - 'cluster_name': 'cluster1', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': '', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'region_name': self.mock_osc.cinder_region_name.return_value, - 'username': 'mesos_user', - 'tenant_name': 'admin', - 'domain_name': 'domainname', - 'rexray_preempt': 'False', - 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', - 'mesos_slave_executor_env_variables': '{}', - 'mesos_slave_work_dir': '/tmp/mesos/slave', - 'mesos_slave_image_providers': 'docker' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/with_private_network.yaml', - '../../common/templates/environments/no_master_lb.yaml'], - env_files) - - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_with_lb( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid): - self.cluster_template_dict['master_lb_enabled'] = True - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - cluster = objects.Cluster(self.context, **self.cluster_dict) - mock_driver.return_value = mesos_dr.Driver() - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'master_flavor': 'master_flavor_id', - 'slave_flavor': 'flavor_id', - 'number_of_slaves': 1, - 'number_of_masters': 1, - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'cluster_name': 'cluster1', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': '', - 'volume_driver': 'volume_driver', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'region_name': self.mock_osc.cinder_region_name.return_value, - 'username': 'mesos_user', - 'tenant_name': 'admin', - 'domain_name': 'domainname', - 'rexray_preempt': 'False', - 'mesos_slave_executor_env_variables': '{}', - 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', - 'mesos_slave_work_dir': '/tmp/mesos/slave', - 'mesos_slave_image_providers': 'docker' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/with_master_lb.yaml'], - env_files) - - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_multi_master( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid): - self.cluster_template_dict['master_lb_enabled'] = True - self.cluster_dict['master_count'] = 2 - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - cluster = objects.Cluster(self.context, **self.cluster_dict) - mock_driver.return_value = mesos_dr.Driver() - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'master_flavor': 'master_flavor_id', - 'slave_flavor': 'flavor_id', - 'number_of_slaves': 1, - 'number_of_masters': 2, - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'cluster_name': 'cluster1', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': '', - 'volume_driver': 'volume_driver', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'region_name': self.mock_osc.cinder_region_name.return_value, - 'username': 'mesos_user', - 'tenant_name': 'admin', - 'domain_name': 'domainname', - 'rexray_preempt': 'False', - 'mesos_slave_executor_env_variables': '{}', - 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', - 'mesos_slave_work_dir': '/tmp/mesos/slave', - 'mesos_slave_image_providers': 'docker' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/with_master_lb.yaml'], - env_files) - - @patch('magnum.conductor.utils.retrieve_cluster_template') - @patch('magnum.conf.CONF') - @patch('magnum.common.clients.OpenStackClients') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def setup_poll_test(self, mock_driver, mock_openstack_client, mock_conf, - mock_retrieve_cluster_template): - mock_conf.cluster_heat.max_attempts = 10 - - cluster = mock.MagicMock() - mock_heat_stack = mock.MagicMock() - mock_heat_client = mock.MagicMock() - mock_heat_client.stacks.get.return_value = mock_heat_stack - mock_openstack_client.heat.return_value = mock_heat_client - mock_driver.return_value = mesos_dr.Driver() - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_retrieve_cluster_template.return_value = cluster_template - poller = heat_driver.HeatPoller(mock_openstack_client, - mock.MagicMock(), cluster, - mesos_dr.Driver()) - poller.get_version_info = mock.MagicMock() - return (mock_heat_stack, cluster, poller) - - def test_poll_node_count(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.parameters = {'number_of_slaves': 1} - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - poller.poll_and_check() - - self.assertEqual(1, cluster.node_count) - - def test_poll_node_count_by_update(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.parameters = {'number_of_slaves': 2} - mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE - poller.poll_and_check() - - self.assertEqual(2, cluster.node_count) diff --git a/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py deleted file mode 100644 index 1ad919d3..00000000 --- a/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py +++ /dev/null @@ -1,490 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from mock import patch - -import magnum.conf -from magnum.drivers.heat import driver as heat_driver -from magnum.drivers.swarm_fedora_atomic_v1 import driver as swarm_dr -from magnum import objects -from magnum.objects.fields import ClusterStatus as cluster_status -from magnum.tests import base - -CONF = magnum.conf.CONF - - -class TestClusterConductorWithSwarm(base.TestCase): - def setUp(self): - super(TestClusterConductorWithSwarm, self).setUp() - self.cluster_template_dict = { - 'image_id': 'image_id', - 'flavor_id': 'flavor_id', - 'master_flavor_id': 'master_flavor_id', - 'keypair_id': 'keypair_id', - 'dns_nameserver': 'dns_nameserver', - 'docker_volume_size': 20, - 'docker_storage_driver': 'devicemapper', - 'external_network_id': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'cluster_distro': 'fedora-atomic', - 'coe': 'swarm', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'tls_disabled': False, - 'registry_enabled': False, - 'server_type': 'vm', - 'network_driver': 'network_driver', - 'labels': {'docker_volume_type': 'lvmdriver-1', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'rexray_preempt': 'False', - 'swarm_strategy': 'spread'}, - 'master_lb_enabled': False, - 'volume_driver': 'rexray' - } - self.cluster_dict = { - 'id': 1, - 'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'cluster_template_id': 'xx-xx-xx-xx', - 'keypair': 'keypair_id', - 'docker_volume_size': 20, - 'name': 'cluster1', - 'stack_id': 'xx-xx-xx-xx', - 'api_address': '172.17.2.3', - 'node_addresses': ['172.17.2.4'], - 'master_count': 1, - 'node_count': 1, - 'discovery_url': 'https://discovery.test.io/123456789', - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - 'coe_version': 'fake-version' - } - - # We need this due to volume_driver=rexray - CONF.set_override('cluster_user_trust', - True, - group='trust') - - osc_patcher = mock.patch('magnum.common.clients.OpenStackClients') - self.mock_osc_class = osc_patcher.start() - self.addCleanup(osc_patcher.stop) - self.mock_osc = mock.MagicMock() - self.mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' - self.mock_osc.url_for.return_value = 'http://192.168.10.10:5000/v3' - self.mock_keystone = mock.MagicMock() - self.mock_keystone.trustee_domain_id = 'trustee_domain_id' - self.mock_osc.keystone.return_value = self.mock_keystone - self.mock_osc_class.return_value = self.mock_osc - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_all_values( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - mock_driver.return_value = swarm_dr.Driver() - cluster = objects.Cluster(self.context, **self.cluster_dict) - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'master_flavor': 'master_flavor_id', - 'node_flavor': 'flavor_id', - 'number_of_masters': 1, - 'number_of_nodes': 1, - 'docker_volume_size': 20, - 'docker_storage_driver': 'devicemapper', - 'discovery_url': 'https://discovery.test.io/123456789', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'tls_disabled': False, - 'registry_enabled': False, - 'network_driver': 'network_driver', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'swarm_version': 'fake-version', - 'swarm_strategy': u'spread', - 'volume_driver': 'rexray', - 'rexray_preempt': 'False', - 'docker_volume_type': 'lvmdriver-1' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/no_master_lb.yaml'], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_with_registry( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - self.cluster_template_dict['registry_enabled'] = True - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - mock_driver.return_value = swarm_dr.Driver() - cluster = objects.Cluster(self.context, **self.cluster_dict) - - CONF.set_override('swift_region', - 'RegionOne', - group='docker_registry') - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'master_flavor': 'master_flavor_id', - 'node_flavor': 'flavor_id', - 'number_of_masters': 1, - 'number_of_nodes': 1, - 'docker_volume_size': 20, - 'discovery_url': 'https://discovery.test.io/123456789', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'tls_disabled': False, - 'registry_enabled': True, - 'registry_container': 'docker_registry', - 'swift_region': 'RegionOne', - 'network_driver': 'network_driver', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'docker_storage_driver': 'devicemapper', - 'swarm_version': 'fake-version', - 'swarm_strategy': u'spread', - 'volume_driver': 'rexray', - 'rexray_preempt': 'False', - 'docker_volume_type': 'lvmdriver-1' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/no_master_lb.yaml'], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_only_required( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - - not_required = ['image_id', 'flavor_id', 'dns_nameserver', - 'docker_volume_size', 'fixed_network', 'http_proxy', - 'https_proxy', 'no_proxy', 'network_driver', - 'master_flavor_id', 'docker_storage_driver', - 'volume_driver', 'rexray_preempt', 'fixed_subnet', - 'docker_volume_type'] - for key in not_required: - self.cluster_template_dict[key] = None - self.cluster_dict['discovery_url'] = 'https://discovery.etcd.io/test' - - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - mock_driver.return_value = swarm_dr.Driver() - cluster = objects.Cluster(self.context, **self.cluster_dict) - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'number_of_masters': 1, - 'number_of_nodes': 1, - 'discovery_url': 'https://discovery.etcd.io/test', - 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'tls_disabled': False, - 'registry_enabled': False, - 'flannel_network_cidr': u'10.101.0.0/16', - 'flannel_network_subnetlen': u'26', - 'flannel_backend': u'vxlan', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'swarm_version': 'fake-version', - 'swarm_strategy': u'spread', - 'rexray_preempt': 'False', - 'docker_volume_type': 'lvmdriver-1', - 'docker_volume_size': 20, - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/with_private_network.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/no_master_lb.yaml'], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_with_lb( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - self.cluster_template_dict['master_lb_enabled'] = True - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - mock_driver.return_value = swarm_dr.Driver() - cluster = objects.Cluster(self.context, **self.cluster_dict) - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'master_flavor': 'master_flavor_id', - 'node_flavor': 'flavor_id', - 'number_of_masters': 1, - 'number_of_nodes': 1, - 'docker_volume_size': 20, - 'docker_storage_driver': 'devicemapper', - 'discovery_url': 'https://discovery.test.io/123456789', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'tls_disabled': False, - 'registry_enabled': False, - 'network_driver': 'network_driver', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'swarm_version': 'fake-version', - 'swarm_strategy': u'spread', - 'volume_driver': 'rexray', - 'rexray_preempt': 'False', - 'docker_volume_type': 'lvmdriver-1' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/with_master_lb.yaml'], - env_files) - - @patch('requests.get') - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_multi_master( - self, - mock_driver, - mock_objects_cluster_template_get_by_uuid, - mock_get): - self.cluster_template_dict['master_lb_enabled'] = True - self.cluster_dict['master_count'] = 2 - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_objects_cluster_template_get_by_uuid.return_value = \ - cluster_template - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"2","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - mock_driver.return_value = swarm_dr.Driver() - cluster = objects.Cluster(self.context, **self.cluster_dict) - - (template_path, - definition, - env_files) = mock_driver()._extract_template_definition(self.context, - cluster) - - expected = { - 'ssh_key_name': 'keypair_id', - 'external_network': 'external_network_id', - 'fixed_network': 'fixed_network', - 'fixed_subnet': 'fixed_subnet', - 'dns_nameserver': 'dns_nameserver', - 'server_image': 'image_id', - 'master_flavor': 'master_flavor_id', - 'node_flavor': 'flavor_id', - 'number_of_masters': 2, - 'number_of_nodes': 1, - 'docker_volume_size': 20, - 'docker_storage_driver': 'devicemapper', - 'discovery_url': 'https://discovery.test.io/123456789', - 'http_proxy': 'http_proxy', - 'https_proxy': 'https_proxy', - 'no_proxy': 'no_proxy', - 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'magnum_url': self.mock_osc.magnum_url.return_value, - 'tls_disabled': False, - 'registry_enabled': False, - 'network_driver': 'network_driver', - 'flannel_network_cidr': '10.101.0.0/16', - 'flannel_network_subnetlen': '26', - 'flannel_backend': 'vxlan', - 'trustee_domain_id': self.mock_keystone.trustee_domain_id, - 'trustee_username': 'fake_trustee', - 'trustee_password': 'fake_trustee_password', - 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', - 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', - 'auth_url': 'http://192.168.10.10:5000/v3', - 'swarm_version': 'fake-version', - 'swarm_strategy': u'spread', - 'volume_driver': 'rexray', - 'rexray_preempt': 'False', - 'docker_volume_type': 'lvmdriver-1' - } - self.assertEqual(expected, definition) - self.assertEqual( - ['../../common/templates/environments/no_private_network.yaml', - '../../common/templates/environments/with_volume.yaml', - '../../common/templates/environments/with_master_lb.yaml'], - env_files) - - @patch('magnum.conductor.utils.retrieve_cluster_template') - @patch('magnum.conf.CONF') - @patch('magnum.common.clients.OpenStackClients') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def setup_poll_test(self, mock_driver, mock_openstack_client, mock_conf, - mock_retrieve_cluster_template): - mock_conf.cluster_heat.max_attempts = 10 - - cluster = mock.MagicMock() - mock_heat_stack = mock.MagicMock() - mock_heat_client = mock.MagicMock() - mock_heat_client.stacks.get.return_value = mock_heat_stack - mock_openstack_client.heat.return_value = mock_heat_client - cluster_template = objects.ClusterTemplate( - self.context, **self.cluster_template_dict) - mock_retrieve_cluster_template.return_value = \ - cluster_template - mock_driver.return_value = swarm_dr.Driver() - poller = heat_driver.HeatPoller(mock_openstack_client, - mock.MagicMock(), cluster, - swarm_dr.Driver()) - poller.get_version_info = mock.MagicMock() - return (mock_heat_stack, cluster, poller) - - def test_poll_node_count(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.parameters = {'number_of_nodes': 1} - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - poller.poll_and_check() - - self.assertEqual(1, cluster.node_count) - - def test_poll_node_count_by_update(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.parameters = {'number_of_nodes': 2} - mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE - poller.poll_and_check() - - self.assertEqual(2, cluster.node_count) diff --git a/magnum/tests/unit/conductor/tasks/__init__.py b/magnum/tests/unit/conductor/tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/conductor/tasks/test_heat_tasks.py b/magnum/tests/unit/conductor/tasks/test_heat_tasks.py deleted file mode 100644 index 973860c9..00000000 --- a/magnum/tests/unit/conductor/tasks/test_heat_tasks.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from taskflow import engines -from taskflow.patterns import linear_flow - -from magnum.conductor.tasks import heat_tasks -from magnum.tests import base - - -class HeatTasksTests(base.TestCase): - - def setUp(self): - super(HeatTasksTests, self).setUp() - self.heat_client = mock.MagicMock(name='heat_client') - - def _get_create_stack_flow(self, heat_client): - flow = linear_flow.Flow("create stack flow") - flow.add( - heat_tasks.CreateStack( - os_client=heat_client, - requires=('stack_name', 'parameters', 'template', 'files'), - provides='new_stack', - ), - ) - return flow - - def _get_update_stack_flow(self, heat_client): - flow = linear_flow.Flow("update stack flow") - flow.add( - heat_tasks.UpdateStack( - os_client=heat_client, - requires=('stack_id', 'parameters', 'template', 'files'), - ), - ) - return flow - - def _get_delete_stack_flow(self, heat_client): - flow = linear_flow.Flow("delete stack flow") - flow.add( - heat_tasks.DeleteStack( - os_client=heat_client, - requires=('stack_id'), - ), - ) - return flow - - def test_create_stack(self): - heat_client = mock.MagicMock(name='heat_client') - stack_id = 'stack_id' - stack_name = 'stack_name' - stack = { - 'stack': { - 'id': stack_id - } - } - heat_client.stacks.create.return_value = stack - flow_store = { - 'stack_name': stack_name, - 'parameters': 'parameters', - 'template': 'template', - 'files': 'files' - } - flow = self._get_create_stack_flow(heat_client) - - result = engines.run(flow, store=flow_store) - heat_client.stacks.create.assert_called_once_with(**flow_store) - self.assertEqual(stack_id, result['new_stack']['stack']['id']) - - def test_create_stack_with_error(self): - heat_client = mock.MagicMock(name='heat_client') - heat_client.stacks.create.side_effect = ValueError - stack_name = 'stack_name' - flow_store = { - 'stack_name': stack_name, - 'parameters': 'parameters', - 'template': 'template', - 'files': 'files' - } - flow = self._get_create_stack_flow(heat_client) - - self.assertRaises(ValueError, engines.run, flow, store=flow_store) - - def test_update_stack(self): - heat_client = mock.MagicMock(name='heat_client') - stack_id = 'stack_id' - flow_store = { - 'stack_id': stack_id, - 'parameters': 'parameters', - 'template': 'template', - 'files': 'files' - } - flow = self._get_update_stack_flow(heat_client) - expected_params = dict(flow_store) - del expected_params['stack_id'] - - engines.run(flow, store=flow_store) - heat_client.stacks.update.assert_called_once_with(stack_id, - **expected_params) - - def test_update_stack_with_error(self): - heat_client = mock.MagicMock(name='heat_client') - heat_client.stacks.update.side_effect = ValueError - stack_id = 'stack_id' - flow_store = { - 'stack_id': stack_id, - 'parameters': 'parameters', - 'template': 'template', - 'files': 'files' - } - flow = self._get_update_stack_flow(heat_client) - - self.assertRaises(ValueError, engines.run, flow, store=flow_store) - - def test_delete_stack(self): - heat_client = mock.MagicMock(name='heat_client') - stack_id = 'stack_id' - flow_store = {'stack_id': stack_id} - flow = self._get_delete_stack_flow(heat_client) - - engines.run(flow, store=flow_store) - heat_client.stacks.delete.assert_called_once_with(stack_id) - - def test_delete_stack_with_error(self): - heat_client = mock.MagicMock(name='heat_client') - heat_client.stacks.delete.side_effect = ValueError - stack_id = 'stack_id' - flow_store = {'stack_id': stack_id} - flow = self._get_delete_stack_flow(heat_client) - - self.assertRaises(ValueError, engines.run, flow, store=flow_store) diff --git a/magnum/tests/unit/conductor/test_k8s_api.py b/magnum/tests/unit/conductor/test_k8s_api.py deleted file mode 100644 index 7722c76c..00000000 --- a/magnum/tests/unit/conductor/test_k8s_api.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from magnum.tests import base - - -class TestK8sAPI(base.TestCase): - content_dict = { - 'fake-magnum-cert-ref': { - 'certificate': 'certificate-content', - 'private_key': 'private-key-content', - 'decrypted_private_key': 'private-key-content', - }, - 'fake-ca-cert-ref': { - 'certificate': 'ca-cert-content', - 'private_key': None, - 'decrypted_private_key': None, - } - } - file_dict = { - 'ca-cert-content': mock.MagicMock(), - 'certificate-content': mock.MagicMock(), - 'private-key-content': mock.MagicMock() - } - file_name = { - 'ca-cert-content': 'ca-cert-temp-file-name', - 'certificate-content': 'cert-temp-file-name', - 'private-key-content': 'priv-key-temp-file-name' - } - - def _mock_cert_mgr_get_cert(self, cert_ref, **kwargs): - cert_obj = mock.MagicMock() - cert_obj.get_certificate.return_value = ( - TestK8sAPI.content_dict[cert_ref]['certificate']) - cert_obj.get_private_key.return_value = ( - TestK8sAPI.content_dict[cert_ref]['private_key']) - cert_obj.get_decrypted_private_key.return_value = ( - TestK8sAPI.content_dict[cert_ref]['decrypted_private_key']) - - return cert_obj diff --git a/magnum/tests/unit/conductor/test_monitors.py b/magnum/tests/unit/conductor/test_monitors.py deleted file mode 100644 index 16e69f1a..00000000 --- a/magnum/tests/unit/conductor/test_monitors.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from oslo_serialization import jsonutils - -from magnum.drivers.common import k8s_monitor -from magnum.drivers.mesos_ubuntu_v1 import monitor as mesos_monitor -from magnum.drivers.swarm_fedora_atomic_v1 import monitor as swarm_monitor -from magnum import objects -from magnum.tests import base -from magnum.tests.unit.db import utils - - -class MonitorsTestCase(base.TestCase): - - test_metrics_spec = { - 'metric1': { - 'unit': 'metric1_unit', - 'func': 'metric1_func', - }, - 'metric2': { - 'unit': 'metric2_unit', - 'func': 'metric2_func', - }, - } - - def setUp(self): - super(MonitorsTestCase, self).setUp() - - cluster = utils.get_test_cluster(node_addresses=['1.2.3.4'], - api_address='https://5.6.7.8:2376', - master_addresses=['10.0.0.6']) - self.cluster = objects.Cluster(self.context, **cluster) - self.monitor = swarm_monitor.SwarmMonitor(self.context, self.cluster) - self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.cluster) - self.mesos_monitor = mesos_monitor.MesosMonitor(self.context, - self.cluster) - p = mock.patch('magnum.drivers.swarm_fedora_atomic_v1.monitor.' - 'SwarmMonitor.metrics_spec', - new_callable=mock.PropertyMock) - self.mock_metrics_spec = p.start() - self.mock_metrics_spec.return_value = self.test_metrics_spec - self.addCleanup(p.stop) - - @mock.patch('magnum.common.docker_utils.docker_for_cluster') - def test_swarm_monitor_pull_data_success(self, mock_docker_cluster): - mock_docker = mock.MagicMock() - mock_docker.info.return_value = {'DriverStatus': [[ - u' \u2514 Reserved Memory', u'0 B / 1 GiB']]} - mock_docker.containers.return_value = [mock.MagicMock()] - mock_docker.inspect_container.return_value = 'test_container' - mock_docker_cluster.return_value.__enter__.return_value = mock_docker - - self.monitor.pull_data() - - self.assertEqual([{'MemTotal': 1073741824.0}], - self.monitor.data['nodes']) - self.assertEqual(['test_container'], self.monitor.data['containers']) - - @mock.patch('magnum.common.docker_utils.docker_for_cluster') - def test_swarm_monitor_pull_data_raise(self, mock_docker_cluster): - mock_container = mock.MagicMock() - mock_docker = mock.MagicMock() - mock_docker.info.return_value = {'DriverStatus': [[ - u' \u2514 Reserved Memory', u'0 B / 1 GiB']]} - mock_docker.containers.return_value = [mock_container] - mock_docker.inspect_container.side_effect = Exception("inspect error") - mock_docker_cluster.return_value.__enter__.return_value = mock_docker - - self.monitor.pull_data() - - self.assertEqual([{'MemTotal': 1073741824.0}], - self.monitor.data['nodes']) - self.assertEqual([mock_container], self.monitor.data['containers']) - - def test_swarm_monitor_get_metric_names(self): - names = self.monitor.get_metric_names() - self.assertEqual(sorted(['metric1', 'metric2']), sorted(names)) - - def test_swarm_monitor_get_metric_unit(self): - unit = self.monitor.get_metric_unit('metric1') - self.assertEqual('metric1_unit', unit) - - def test_swarm_monitor_compute_metric_value(self): - mock_func = mock.MagicMock() - mock_func.return_value = 'metric1_value' - self.monitor.metric1_func = mock_func - value = self.monitor.compute_metric_value('metric1') - self.assertEqual('metric1_value', value) - - def test_swarm_monitor_compute_memory_util(self): - test_data = { - 'nodes': [ - { - 'Name': 'node', - 'MemTotal': 20, - }, - ], - 'containers': [ - { - 'Name': 'container', - 'HostConfig': { - 'Memory': 10, - }, - }, - ], - } - self.monitor.data = test_data - mem_util = self.monitor.compute_memory_util() - self.assertEqual(50, mem_util) - - test_data = { - 'nodes': [], - 'containers': [], - } - self.monitor.data = test_data - mem_util = self.monitor.compute_memory_util() - self.assertEqual(0, mem_util) - - @mock.patch('magnum.conductor.k8s_api.create_k8s_api') - def test_k8s_monitor_pull_data_success(self, mock_k8s_api): - mock_nodes = mock.MagicMock() - mock_node = mock.MagicMock() - mock_node.status = mock.MagicMock() - mock_node.status.capacity = {'memory': '2000Ki', 'cpu': '1'} - mock_nodes.items = [mock_node] - mock_k8s_api.return_value.list_node.return_value = ( - mock_nodes) - mock_pods = mock.MagicMock() - mock_pod = mock.MagicMock() - mock_pod.spec = mock.MagicMock() - mock_container = mock.MagicMock() - mock_container.resources = mock.MagicMock() - mock_container.resources.limits = "{'memory': '100Mi', 'cpu': '500m'}" - mock_pod.spec.containers = [mock_container] - mock_pods.items = [mock_pod] - mock_k8s_api.return_value.list_namespaced_pod.return_value = mock_pods - - self.k8s_monitor.pull_data() - self.assertEqual(self.k8s_monitor.data['nodes'], - [{'Memory': 2048000.0, 'Cpu': 1}]) - self.assertEqual(self.k8s_monitor.data['pods'], - [{'Memory': 104857600.0, 'Cpu': 0.5}]) - - def test_k8s_monitor_get_metric_names(self): - k8s_metric_spec = 'magnum.drivers.common.k8s_monitor.K8sMonitor.'\ - 'metrics_spec' - with mock.patch(k8s_metric_spec, - new_callable=mock.PropertyMock) as mock_k8s_metric: - mock_k8s_metric.return_value = self.test_metrics_spec - names = self.k8s_monitor.get_metric_names() - self.assertEqual(sorted(['metric1', 'metric2']), sorted(names)) - - def test_k8s_monitor_get_metric_unit(self): - k8s_metric_spec = 'magnum.drivers.common.k8s_monitor.K8sMonitor.'\ - 'metrics_spec' - with mock.patch(k8s_metric_spec, - new_callable=mock.PropertyMock) as mock_k8s_metric: - mock_k8s_metric.return_value = self.test_metrics_spec - unit = self.k8s_monitor.get_metric_unit('metric1') - self.assertEqual('metric1_unit', unit) - - def test_k8s_monitor_compute_memory_util(self): - test_data = { - 'nodes': [ - { - 'Memory': 20, - }, - ], - 'pods': [ - { - 'Memory': 10, - }, - ], - } - self.k8s_monitor.data = test_data - mem_util = self.k8s_monitor.compute_memory_util() - self.assertEqual(50, mem_util) - - test_data = { - 'nodes': [], - 'pods': [], - } - self.k8s_monitor.data = test_data - mem_util = self.k8s_monitor.compute_memory_util() - self.assertEqual(0, mem_util) - - def test_k8s_monitor_compute_cpu_util(self): - test_data = { - 'nodes': [ - { - 'Cpu': 1, - }, - ], - 'pods': [ - { - 'Cpu': 0.5, - }, - ], - } - self.k8s_monitor.data = test_data - cpu_util = self.k8s_monitor.compute_cpu_util() - self.assertEqual(50, cpu_util) - - test_data = { - 'nodes': [], - 'pods': [], - } - self.k8s_monitor.data = test_data - cpu_util = self.k8s_monitor.compute_cpu_util() - self.assertEqual(0, cpu_util) - - def _test_mesos_monitor_pull_data( - self, mock_url_get, state_json, expected_mem_total, - expected_mem_used, expected_cpu_total, expected_cpu_used): - state_json = jsonutils.dumps(state_json) - mock_url_get.return_value = state_json - self.mesos_monitor.pull_data() - self.assertEqual(self.mesos_monitor.data['mem_total'], - expected_mem_total) - self.assertEqual(self.mesos_monitor.data['mem_used'], - expected_mem_used) - self.assertEqual(self.mesos_monitor.data['cpu_total'], - expected_cpu_total) - self.assertEqual(self.mesos_monitor.data['cpu_used'], - expected_cpu_used) - - @mock.patch('magnum.common.urlfetch.get') - def test_mesos_monitor_pull_data_success(self, mock_url_get): - state_json = { - 'leader': 'master@10.0.0.6:5050', - 'pid': 'master@10.0.0.6:5050', - 'slaves': [{ - 'resources': { - 'mem': 100, - 'cpus': 1, - }, - 'used_resources': { - 'mem': 50, - 'cpus': 0.2, - } - }] - } - self._test_mesos_monitor_pull_data(mock_url_get, state_json, - 100, 50, 1, 0.2) - - @mock.patch('magnum.common.urlfetch.get') - def test_mesos_monitor_pull_data_success_not_leader(self, mock_url_get): - state_json = { - 'leader': 'master@10.0.0.6:5050', - 'pid': 'master@1.1.1.1:5050', - 'slaves': [] - } - self._test_mesos_monitor_pull_data(mock_url_get, state_json, - 0, 0, 0, 0) - - @mock.patch('magnum.common.urlfetch.get') - def test_mesos_monitor_pull_data_success_no_master(self, mock_url_get): - self.cluster.master_addresses = [] - self._test_mesos_monitor_pull_data(mock_url_get, {}, 0, 0, 0, 0) - - def test_mesos_monitor_get_metric_names(self): - mesos_metric_spec = ('magnum.drivers.mesos_ubuntu_v1.monitor.' - 'MesosMonitor.metrics_spec') - with mock.patch(mesos_metric_spec, - new_callable=mock.PropertyMock) as mock_mesos_metric: - mock_mesos_metric.return_value = self.test_metrics_spec - names = self.mesos_monitor.get_metric_names() - self.assertEqual(sorted(['metric1', 'metric2']), sorted(names)) - - def test_mesos_monitor_get_metric_unit(self): - mesos_metric_spec = ('magnum.drivers.mesos_ubuntu_v1.monitor.' - 'MesosMonitor.metrics_spec') - with mock.patch(mesos_metric_spec, - new_callable=mock.PropertyMock) as mock_mesos_metric: - mock_mesos_metric.return_value = self.test_metrics_spec - unit = self.mesos_monitor.get_metric_unit('metric1') - self.assertEqual('metric1_unit', unit) - - def test_mesos_monitor_compute_memory_util(self): - test_data = { - 'mem_total': 100, - 'mem_used': 50 - } - self.mesos_monitor.data = test_data - mem_util = self.mesos_monitor.compute_memory_util() - self.assertEqual(50, mem_util) - - test_data = { - 'mem_total': 0, - 'pods': 0, - } - self.mesos_monitor.data = test_data - mem_util = self.mesos_monitor.compute_memory_util() - self.assertEqual(0, mem_util) - - test_data = { - 'mem_total': 100, - 'mem_used': 0, - 'pods': 0, - } - self.mesos_monitor.data = test_data - mem_util = self.mesos_monitor.compute_memory_util() - self.assertEqual(0, mem_util) - - def test_mesos_monitor_compute_cpu_util(self): - test_data = { - 'cpu_total': 1, - 'cpu_used': 0.2, - } - self.mesos_monitor.data = test_data - cpu_util = self.mesos_monitor.compute_cpu_util() - self.assertEqual(20, cpu_util) - - test_data = { - 'cpu_total': 100, - 'cpu_used': 0, - } - self.mesos_monitor.data = test_data - cpu_util = self.mesos_monitor.compute_cpu_util() - self.assertEqual(0, cpu_util) diff --git a/magnum/tests/unit/conductor/test_rpcapi.py b/magnum/tests/unit/conductor/test_rpcapi.py deleted file mode 100644 index 620ea617..00000000 --- a/magnum/tests/unit/conductor/test_rpcapi.py +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for :py:class:`magnum.conductor.rpcapi.API`. -""" - -import copy - -import mock - -from magnum.conductor import api as conductor_rpcapi -from magnum import objects -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils as dbutils - - -class RPCAPITestCase(base.DbTestCase): - - def setUp(self): - super(RPCAPITestCase, self).setUp() - self.fake_cluster = dbutils.get_test_cluster(driver='fake-driver') - self.fake_certificate = objects.Certificate.from_db_cluster( - self.fake_cluster) - self.fake_certificate.csr = 'fake-csr' - - def _test_rpcapi(self, method, rpc_method, **kwargs): - rpcapi_cls = kwargs.pop('rpcapi_cls', conductor_rpcapi.API) - rpcapi = rpcapi_cls(topic='fake-topic') - - expected_retval = 'hello world' if rpc_method == 'call' else None - - expected_topic = 'fake-topic' - if 'host' in kwargs: - expected_topic += ".%s" % kwargs['host'] - - target = { - "topic": expected_topic, - "version": kwargs.pop('version', 1.0) - } - expected_msg = copy.deepcopy(kwargs) - - self.fake_args = None - self.fake_kwargs = None - - def _fake_prepare_method(*args, **kwargs): - for kwd in kwargs: - self.assertEqual(target[kwd], kwargs[kwd]) - return rpcapi._client - - def _fake_rpc_method(*args, **kwargs): - self.fake_args = args - self.fake_kwargs = kwargs - if expected_retval: - return expected_retval - - with mock.patch.object(rpcapi._client, "prepare") as mock_prepared: - mock_prepared.side_effect = _fake_prepare_method - - with mock.patch.object(rpcapi._client, rpc_method) as mock_method: - mock_method.side_effect = _fake_rpc_method - retval = getattr(rpcapi, method)(**kwargs) - self.assertEqual(expected_retval, retval) - expected_args = [None, method, expected_msg] - for arg, expected_arg in zip(self.fake_args, expected_args): - self.assertEqual(expected_arg, arg) - - def test_cluster_create(self): - self._test_rpcapi('cluster_create', - 'call', - version='1.0', - cluster=self.fake_cluster, - create_timeout=15) - - def test_cluster_delete(self): - self._test_rpcapi('cluster_delete', - 'call', - version='1.0', - uuid=self.fake_cluster['uuid']) - - self._test_rpcapi('cluster_delete', - 'call', - version='1.1', - uuid=self.fake_cluster['name']) - - def test_cluster_update(self): - self._test_rpcapi('cluster_update', - 'call', - version='1.1', - cluster=self.fake_cluster['name']) - - def test_ping_conductor(self): - self._test_rpcapi('ping_conductor', - 'call', - rpcapi_cls=conductor_rpcapi.ListenerAPI, - version='1.0') - - def test_sign_certificate(self): - self._test_rpcapi('sign_certificate', - 'call', - version='1.0', - cluster=self.fake_cluster, - certificate=self.fake_certificate) - - def test_get_ca_certificate(self): - self._test_rpcapi('get_ca_certificate', - 'call', - version='1.0', - cluster=self.fake_cluster) diff --git a/magnum/tests/unit/conductor/test_scale_manager.py b/magnum/tests/unit/conductor/test_scale_manager.py deleted file mode 100644 index cd647d99..00000000 --- a/magnum/tests/unit/conductor/test_scale_manager.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from magnum.common import exception -from magnum.conductor import scale_manager -from magnum.drivers.common.k8s_scale_manager import K8sScaleManager -from magnum.drivers.mesos_ubuntu_v1.scale_manager import MesosScaleManager -from magnum.tests import base - - -class TestScaleManager(base.TestCase): - - def _test_get_removal_nodes( - self, mock_get_hosts, mock_get_num_of_removal, - mock_is_scale_down, mock_get_by_uuid, is_scale_down, - num_of_removal, all_hosts, container_hosts, - expected_removal_hosts): - - mock_is_scale_down.return_value = is_scale_down - mock_get_num_of_removal.return_value = num_of_removal - - mock_get_hosts.return_value = container_hosts - - mock_heat_output = mock.MagicMock() - mock_heat_output.get_output_value.return_value = all_hosts - - mock_stack = mock.MagicMock() - mock_heat_client = mock.MagicMock() - mock_osc = mock.MagicMock() - mock_heat_client.stacks.get.return_value = mock_stack - mock_osc.heat.return_value = mock_heat_client - - mock_context = mock.MagicMock() - mock_cluster = mock.MagicMock() - scale_mgr = scale_manager.ScaleManager(mock_context, mock_osc, - mock_cluster) - - if expected_removal_hosts is None: - self.assertRaises(exception.MagnumException, - scale_mgr.get_removal_nodes, mock_heat_output) - else: - removal_hosts = scale_mgr.get_removal_nodes(mock_heat_output) - self.assertEqual(expected_removal_hosts, removal_hosts) - if num_of_removal > 0: - mock_get_hosts.assert_called_once_with(mock_context, - mock_cluster) - - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_num_of_removal') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_hosts_with_container') - def test_get_removal_nodes_no_container_host( - self, mock_get_hosts, mock_get_num_of_removal, - mock_is_scale_down, mock_get_by_uuid): - - is_scale_down = True - num_of_removal = 1 - all_hosts = ['10.0.0.3'] - container_hosts = set() - expected_removal_hosts = ['10.0.0.3'] - self._test_get_removal_nodes( - mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, - mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, - container_hosts, expected_removal_hosts) - - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_num_of_removal') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_hosts_with_container') - def test_get_removal_nodes_one_container_host( - self, mock_get_hosts, mock_get_num_of_removal, - mock_is_scale_down, mock_get_by_uuid): - - is_scale_down = True - num_of_removal = 1 - all_hosts = ['10.0.0.3', '10.0.0.4'] - container_hosts = set(['10.0.0.3']) - expected_removal_hosts = ['10.0.0.4'] - self._test_get_removal_nodes( - mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, - mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, - container_hosts, expected_removal_hosts) - - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_num_of_removal') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_hosts_with_container') - def test_get_removal_nodes_two_container_hosts( - self, mock_get_hosts, mock_get_num_of_removal, - mock_is_scale_down, mock_get_by_uuid): - - is_scale_down = True - num_of_removal = 1 - all_hosts = ['10.0.0.3', '10.0.0.4'] - container_hosts = set(['10.0.0.3', '10.0.0.4']) - expected_removal_hosts = [] - self._test_get_removal_nodes( - mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, - mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, - container_hosts, expected_removal_hosts) - - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_num_of_removal') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_hosts_with_container') - def test_get_removal_nodes_three_container_hosts( - self, mock_get_hosts, mock_get_num_of_removal, - mock_is_scale_down, mock_get_by_uuid): - - is_scale_down = True - num_of_removal = 1 - all_hosts = ['10.0.0.3', '10.0.0.4'] - container_hosts = set(['10.0.0.3', '10.0.0.4', '10.0.0.5']) - expected_removal_hosts = [] - self._test_get_removal_nodes( - mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, - mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, - container_hosts, expected_removal_hosts) - - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_num_of_removal') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_hosts_with_container') - def test_get_removal_nodes_scale_up( - self, mock_get_hosts, mock_get_num_of_removal, - mock_is_scale_down, mock_get_by_uuid): - - is_scale_down = False - num_of_removal = -1 - all_hosts = ['10.0.0.3', '10.0.0.4'] - container_hosts = set() - expected_removal_hosts = [] - self._test_get_removal_nodes( - mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, - mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, - container_hosts, expected_removal_hosts) - - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_num_of_removal') - @mock.patch('magnum.conductor.scale_manager.ScaleManager.' - '_get_hosts_with_container') - def test_get_removal_nodes_with_none_hosts( - self, mock_get_hosts, mock_get_num_of_removal, - mock_is_scale_down, mock_get_by_uuid): - - is_scale_down = True - num_of_removal = 1 - all_hosts = None - container_hosts = set() - expected_removal_hosts = None - self._test_get_removal_nodes( - mock_get_hosts, mock_get_num_of_removal, mock_is_scale_down, - mock_get_by_uuid, is_scale_down, num_of_removal, all_hosts, - container_hosts, expected_removal_hosts) - - -class TestK8sScaleManager(base.TestCase): - - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('magnum.conductor.k8s_api.create_k8s_api') - def test_get_hosts_with_container(self, mock_create_api, mock_get): - pods = mock.MagicMock() - pod_1 = mock.MagicMock() - pod_1.spec.node_name = 'node1' - pod_2 = mock.MagicMock() - pod_2.spec.node_name = 'node2' - pods.items = [pod_1, pod_2] - mock_api = mock.MagicMock() - mock_api.list_namespaced_pod.return_value = pods - mock_create_api.return_value = mock_api - - mgr = K8sScaleManager( - mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) - hosts = mgr._get_hosts_with_container( - mock.MagicMock(), mock.MagicMock()) - self.assertEqual(hosts, {'node1', 'node2'}) - - -class TestMesosScaleManager(base.TestCase): - - @mock.patch('magnum.objects.Cluster.get_by_uuid') - @mock.patch('marathon.MarathonClient') - @mock.patch('marathon.MarathonClient.list_tasks') - def test_get_hosts_with_container(self, mock_list_tasks, - mock_client, mock_get): - task_1 = mock.MagicMock() - task_1.host = 'node1' - task_2 = mock.MagicMock() - task_2.host = 'node2' - tasks = [task_1, task_2] - mock_list_tasks.return_value = tasks - - mgr = MesosScaleManager( - mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) - hosts = mgr._get_hosts_with_container( - mock.MagicMock(), mock.MagicMock()) - self.assertEqual(hosts, {'node1', 'node2'}) diff --git a/magnum/tests/unit/conductor/test_utils.py b/magnum/tests/unit/conductor/test_utils.py deleted file mode 100644 index 95e712b4..00000000 --- a/magnum/tests/unit/conductor/test_utils.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2015 Huawei Technologies Co.,LTD. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from mock import patch - -from magnum.conductor import utils -from magnum import objects -from magnum.tests import base - - -class TestConductorUtils(base.TestCase): - - def _test_retrieve_cluster(self, expected_cluster_uuid, - mock_cluster_get_by_uuid): - expected_context = 'context' - utils.retrieve_cluster(expected_context, expected_cluster_uuid) - mock_cluster_get_by_uuid.assert_called_once_with( - expected_context, expected_cluster_uuid) - - def get_fake_id(self): - return '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' - - def _get_type_uri(self): - return 'service/security/account/user' - - @patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_retrieve_cluster_template(self, - mock_cluster_template_get_by_uuid): - expected_context = 'context' - expected_cluster_template_uuid = 'ClusterTemplate_uuid' - - cluster = objects.Cluster({}) - cluster.cluster_template_id = expected_cluster_template_uuid - - utils.retrieve_cluster_template(expected_context, cluster) - - mock_cluster_template_get_by_uuid.assert_called_once_with( - expected_context, - expected_cluster_template_uuid) - - @patch('oslo_utils.uuidutils.is_uuid_like') - @patch('magnum.objects.Cluster.get_by_name') - def test_retrieve_cluster_uuid_from_name(self, mock_cluster_get_by_name, - mock_uuid_like): - cluster = objects.Cluster(uuid='5d12f6fd-a196-4bf0-ae4c-1f639a523a52') - mock_uuid_like.return_value = False - mock_cluster_get_by_name.return_value = cluster - cluster_uuid = utils.retrieve_cluster_uuid('context', 'fake_name') - self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', cluster_uuid) - - mock_uuid_like.assert_called_once_with('fake_name') - mock_cluster_get_by_name.assert_called_once_with('context', - 'fake_name') - - @patch('oslo_utils.uuidutils.is_uuid_like') - @patch('magnum.objects.Cluster.get_by_name') - def test_retrieve_cluster_uuid_from_uuid(self, mock_cluster_get_by_name, - mock_uuid_like): - cluster_uuid = utils.retrieve_cluster_uuid( - 'context', - '5d12f6fd-a196-4bf0-ae4c-1f639a523a52') - self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', cluster_uuid) - mock_uuid_like.return_value = True - mock_cluster_get_by_name.assert_not_called() - - def _get_heat_stacks_get_mock_obj(self, status): - mock_stack = mock.MagicMock() - mock_osc = mock.MagicMock() - mock_stack_obj = mock.MagicMock() - mock_stack_obj.stack_status = status - stack_get = mock.MagicMock() - stack_get.get.return_value = mock_stack_obj - mock_stack.stacks = stack_get - mock_osc.heat.return_value = mock_stack - return mock_osc - - @patch('magnum.conductor.utils.retrieve_cluster') - @patch('magnum.conductor.utils.clients.OpenStackClients') - def test_object_has_stack_invalid_status(self, mock_oscs, - mock_retrieve_cluster): - - mock_osc = self._get_heat_stacks_get_mock_obj("INVALID_STATUS") - mock_oscs.return_value = mock_osc - self.assertTrue(utils.object_has_stack('context', self.get_fake_id())) - mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) - - @patch('magnum.conductor.utils.retrieve_cluster') - @patch('magnum.conductor.utils.clients.OpenStackClients') - def test_object_has_stack_delete_in_progress(self, mock_oscs, - mock_retrieve_cluster): - - mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_IN_PROGRESS") - mock_oscs.return_value = mock_osc - self.assertFalse(utils.object_has_stack('context', self.get_fake_id())) - mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) - - @patch('magnum.conductor.utils.retrieve_cluster') - @patch('magnum.conductor.utils.clients.OpenStackClients') - def test_object_has_stack_delete_complete_status(self, mock_oscs, - mock_retrieve_cluster): - mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_COMPLETE") - mock_oscs.return_value = mock_osc - self.assertFalse(utils.object_has_stack('context', self.get_fake_id())) - mock_retrieve_cluster.assert_called_with('context', self.get_fake_id()) - - @patch('magnum.objects.Cluster.get_by_uuid') - def test_retrieve_cluster_uuid(self, mock_get_by_uuid): - mock_get_by_uuid.return_value = True - utils.retrieve_cluster('context', - '5d12f6fd-a196-4bf0-ae4c-1f639a523a52') - self.assertTrue(mock_get_by_uuid.called) - - @patch('magnum.objects.Cluster.get_by_name') - def test_retrieve_cluster_name(self, mock_get_by_name): - mock_get_by_name.return_value = mock.MagicMock() - utils.retrieve_cluster('context', '1') - self.assertTrue(mock_get_by_name.called) - - @patch('magnum.conductor.utils.resource.Resource') - def test_get_request_audit_info_with_none_context(self, mock_resource): - mock_resource.return_value = 'resource' - result = utils._get_request_audit_info(context=None) - self.assertTrue(mock_resource.called) - self.assertEqual(result, 'resource') - - def _assert_for_user_project_domain_resource(self, result, ctxt, mock_res): - mock_res.assert_called_once_with(typeURI=self._get_type_uri()) - self.assertEqual(result.user_id, ctxt.user_id) - self.assertEqual(result.project_id, ctxt.project_id) - self.assertEqual(result.domain_id, ctxt.domain_id) - - def _get_context(self, user_id=None, project_id=None, domain_id=None): - context = self.mock_make_context() - context.user_id = user_id - context.project_id = project_id - context.domain_id = domain_id - return context - - @patch('magnum.conductor.utils.resource.Resource') - def test_get_request_audit_info_with_none_userid(self, mock_resource): - context = self._get_context(project_id='test_project_id', - domain_id='test_domain_id') - - mock_resource.return_value = context - result = utils._get_request_audit_info(context) - self._assert_for_user_project_domain_resource(result, context, - mock_resource) - - @patch('magnum.conductor.utils.resource.Resource') - def test_get_request_audit_info_with_none_projectid(self, mock_resource): - context = self._get_context(user_id='test_user_id', - domain_id='test_domain_id') - - mock_resource.return_value = context - result = utils._get_request_audit_info(context) - self._assert_for_user_project_domain_resource(result, context, - mock_resource) - - @patch('magnum.conductor.utils.resource.Resource') - def test_get_request_audit_info_with_none_domainid(self, mock_resource): - context = self._get_context(user_id='test_user_id', - project_id='test_project_id') - - mock_resource.return_value = context - result = utils._get_request_audit_info(context) - self._assert_for_user_project_domain_resource(result, context, - mock_resource) - - @patch('magnum.conductor.utils.resource.Resource') - def test_get_request_audit_info_with_none_domainid_userid(self, - mock_resource): - - context = self._get_context(project_id='test_project_id') - mock_resource.return_value = context - result = utils._get_request_audit_info(context) - self._assert_for_user_project_domain_resource(result, context, - mock_resource) - - @patch('magnum.conductor.utils.resource.Resource') - def test_get_request_audit_info_with_none_userid_projectid(self, - mock_resource): - - context = self._get_context(domain_id='test_domain_id') - mock_resource.return_value = context - result = utils._get_request_audit_info(context) - self._assert_for_user_project_domain_resource(result, context, - mock_resource) - - @patch('magnum.conductor.utils.resource.Resource') - def test_get_request_audit_info_with_none_domain_project_id(self, - mock_resource): - - context = self._get_context(user_id='test_user_id') - mock_resource.return_value = context - result = utils._get_request_audit_info(context) - self._assert_for_user_project_domain_resource(result, context, - mock_resource) diff --git a/magnum/tests/unit/conf/__init__.py b/magnum/tests/unit/conf/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/conf/test_conf.py b/magnum/tests/unit/conf/test_conf.py deleted file mode 100644 index 0ee33a84..00000000 --- a/magnum/tests/unit/conf/test_conf.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2016 Fujitsu Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import mock -from oslo_config import cfg -import six - -from magnum.conf import opts -from magnum.tests import base - - -class ConfTestCase(base.TestCase): - - def test_list_opts(self): - for group, opt_list in opts.list_opts(): - if isinstance(group, six.string_types): - self.assertEqual(group, 'DEFAULT') - else: - self.assertIsInstance(group, cfg.OptGroup) - for opt in opt_list: - self.assertIsInstance(opt, cfg.Opt) - - def test_list_module_name_invalid_mods(self): - with mock.patch('pkgutil.iter_modules') as mock_mods: - mock_mods.return_value = [(None, 'foo', True), - (None, 'opts', False)] - self.assertEqual([], opts._list_module_names()) - - def test_list_module_name_valid_mods(self): - with mock.patch('pkgutil.iter_modules') as mock_mods: - mock_mods.return_value = [(None, 'foo', False)] - self.assertEqual(['foo'], opts._list_module_names()) - - def test_import_mods_no_func(self): - modules = ['foo', 'bar'] - with mock.patch('importlib.import_module') as mock_import: - mock_import.return_value = mock.sentinel.mods - self.assertRaises(AttributeError, opts._import_modules, modules) - mock_import.assert_called_once_with('magnum.conf.foo') - - def test_import_mods_valid_func(self): - modules = ['foo', 'bar'] - with mock.patch('importlib.import_module') as mock_import: - mock_mod = mock.MagicMock() - mock_import.return_value = mock_mod - self.assertEqual([mock_mod, mock_mod], - opts._import_modules(modules)) - mock_import.assert_has_calls([mock.call('magnum.conf.foo'), - mock.call('magnum.conf.bar')]) - - def test_append_config(self): - opt = collections.defaultdict(list) - mock_module = mock.MagicMock() - mock_conf = mock.MagicMock() - mock_module.list_opts.return_value = mock_conf - mock_conf.items.return_value = [('foo', 'bar')] - opts._append_config_options([mock_module], opt) - self.assertEqual({'foo': ['b', 'a', 'r']}, opt) diff --git a/magnum/tests/unit/db/__init__.py b/magnum/tests/unit/db/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/db/base.py b/magnum/tests/unit/db/base.py deleted file mode 100644 index 711d30ca..00000000 --- a/magnum/tests/unit/db/base.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2012 NTT DOCOMO, INC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Magnum DB test base class.""" - -import fixtures - -import magnum.conf -from magnum.db import api as dbapi -from magnum.db.sqlalchemy import api as sqla_api -from magnum.db.sqlalchemy import migration -from magnum.db.sqlalchemy import models -from magnum.tests import base - - -CONF = magnum.conf.CONF - -_DB_CACHE = None - - -class Database(fixtures.Fixture): - - def __init__(self, db_api, db_migrate, sql_connection): - self.sql_connection = sql_connection - - self.engine = db_api.get_engine() - self.engine.dispose() - conn = self.engine.connect() - self.setup_sqlite(db_migrate) - self.post_migrations() - - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - - def setup_sqlite(self, db_migrate): - if db_migrate.version(): - return - models.Base.metadata.create_all(self.engine) - db_migrate.stamp('head') - - def _setUp(self): - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - - def post_migrations(self): - """Any addition steps that are needed outside of the migrations.""" - - -class DbTestCase(base.TestCase): - - def setUp(self): - super(DbTestCase, self).setUp() - - self.dbapi = dbapi.get_instance() - - global _DB_CACHE - if not _DB_CACHE: - _DB_CACHE = Database(sqla_api, migration, - sql_connection=CONF.database.connection) - self.useFixture(_DB_CACHE) diff --git a/magnum/tests/unit/db/sqlalchemy/__init__.py b/magnum/tests/unit/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/db/sqlalchemy/test_types.py b/magnum/tests/unit/db/sqlalchemy/test_types.py deleted file mode 100644 index f0dfe24d..00000000 --- a/magnum/tests/unit/db/sqlalchemy/test_types.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for custom SQLAlchemy types via Magnum DB.""" - -from oslo_db import exception as db_exc -from oslo_utils import uuidutils - -import magnum.db.sqlalchemy.api as sa_api -from magnum.db.sqlalchemy import models -from magnum.tests.unit.db import base - - -class SqlAlchemyCustomTypesTestCase(base.DbTestCase): - - def test_JSONEncodedDict_default_value(self): - # Create ClusterTemplate w/o labels - cluster_template1_id = uuidutils.generate_uuid() - self.dbapi.create_cluster_template({'uuid': cluster_template1_id}) - cluster_template1 = sa_api.model_query( - models.ClusterTemplate).filter_by(uuid=cluster_template1_id).one() - self.assertEqual({}, cluster_template1.labels) - - # Create ClusterTemplate with labels - cluster_template2_id = uuidutils.generate_uuid() - self.dbapi.create_cluster_template( - {'uuid': cluster_template2_id, 'labels': {'bar': 'foo'}}) - cluster_template2 = sa_api.model_query( - models.ClusterTemplate).filter_by(uuid=cluster_template2_id).one() - self.assertEqual('foo', cluster_template2.labels['bar']) - - def test_JSONEncodedDict_type_check(self): - self.assertRaises(db_exc.DBError, - self.dbapi.create_cluster_template, - {'labels': - ['this is not a dict']}) - - def test_JSONEncodedList_default_value(self): - # Create cluster w/o master_addresses - cluster1_id = uuidutils.generate_uuid() - self.dbapi.create_cluster({'uuid': cluster1_id}) - cluster1 = sa_api.model_query( - models.Cluster).filter_by(uuid=cluster1_id).one() - self.assertEqual([], cluster1.master_addresses) - - # Create cluster with master_addresses - cluster2_id = uuidutils.generate_uuid() - self.dbapi.create_cluster({'uuid': cluster2_id, - 'master_addresses': ['mymaster_address1', - 'mymaster_address2']}) - cluster2 = sa_api.model_query( - models.Cluster).filter_by(uuid=cluster2_id).one() - self.assertEqual(['mymaster_address1', 'mymaster_address2'], - cluster2.master_addresses) - - def test_JSONEncodedList_type_check(self): - self.assertRaises(db_exc.DBError, - self.dbapi.create_cluster, - {'master_addresses': - {'this is not a list': 'test'}}) diff --git a/magnum/tests/unit/db/test_cluster.py b/magnum/tests/unit/db/test_cluster.py deleted file mode 100644 index f0b0561d..00000000 --- a/magnum/tests/unit/db/test_cluster.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating Clusters via the DB API""" -from oslo_utils import uuidutils -import six - -from magnum.common import context -from magnum.common import exception -from magnum.objects.fields import ClusterStatus as cluster_status -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class DbClusterTestCase(base.DbTestCase): - - def test_create_cluster(self): - utils.create_test_cluster() - - def test_create_cluster_nullable_cluster_template_id(self): - utils.create_test_cluster(cluster_template_id=None) - - def test_create_cluster_already_exists(self): - utils.create_test_cluster() - self.assertRaises(exception.ClusterAlreadyExists, - utils.create_test_cluster) - - def test_get_cluster_by_id(self): - cluster = utils.create_test_cluster() - res = self.dbapi.get_cluster_by_id(self.context, cluster.id) - self.assertEqual(cluster.id, res.id) - self.assertEqual(cluster.uuid, res.uuid) - - def test_get_cluster_by_name(self): - cluster = utils.create_test_cluster() - res = self.dbapi.get_cluster_by_name(self.context, cluster.name) - self.assertEqual(cluster.name, res.name) - self.assertEqual(cluster.uuid, res.uuid) - - def test_get_cluster_by_uuid(self): - cluster = utils.create_test_cluster() - res = self.dbapi.get_cluster_by_uuid(self.context, cluster.uuid) - self.assertEqual(cluster.id, res.id) - self.assertEqual(cluster.uuid, res.uuid) - - def test_get_cluster_that_does_not_exist(self): - self.assertRaises(exception.ClusterNotFound, - self.dbapi.get_cluster_by_id, - self.context, 999) - self.assertRaises(exception.ClusterNotFound, - self.dbapi.get_cluster_by_uuid, - self.context, - '12345678-9999-0000-aaaa-123456789012') - self.assertRaises(exception.ClusterNotFound, - self.dbapi.get_cluster_by_name, - self.context, 'not_found') - - def test_get_cluster_by_name_multiple_cluster(self): - utils.create_test_cluster( - id=1, name='clusterone', - uuid=uuidutils.generate_uuid()) - utils.create_test_cluster( - id=2, name='clusterone', - uuid=uuidutils.generate_uuid()) - self.assertRaises(exception.Conflict, - self.dbapi.get_cluster_by_name, - self.context, 'clusterone') - - def test_get_all_cluster_stats(self): - utils.create_test_cluster( - id=1, name='clusterone', - uuid=uuidutils.generate_uuid()) - utils.create_test_cluster( - id=2, name='clustertwo', - uuid=uuidutils.generate_uuid()) - ret = self.dbapi.get_cluster_stats(self.context) - self.assertEqual(ret, (2, 12)) - - def test_get_one_tenant_cluster_stats(self): - utils.create_test_cluster( - id=1, name='clusterone', project_id='proj1', - uuid=uuidutils.generate_uuid()) - utils.create_test_cluster( - id=2, name='clustertwo', project_id='proj2', - uuid=uuidutils.generate_uuid()) - ret = self.dbapi.get_cluster_stats(self.context, 'proj2') - self.assertEqual(ret, (1, 6)) - - def test_get_cluster_list(self): - uuids = [] - for i in range(1, 6): - cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid()) - uuids.append(six.text_type(cluster['uuid'])) - res = self.dbapi.get_cluster_list(self.context) - res_uuids = [r.uuid for r in res] - self.assertEqual(sorted(uuids), sorted(res_uuids)) - - def test_get_cluster_list_sorted(self): - uuids = [] - for _ in range(5): - cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid()) - uuids.append(six.text_type(cluster.uuid)) - res = self.dbapi.get_cluster_list(self.context, sort_key='uuid') - res_uuids = [r.uuid for r in res] - self.assertEqual(sorted(uuids), res_uuids) - - self.assertRaises(exception.InvalidParameterValue, - self.dbapi.get_cluster_list, - self.context, - sort_key='foo') - - def test_get_cluster_list_with_filters(self): - ct1 = utils.get_test_cluster_template(id=1, - uuid=uuidutils.generate_uuid()) - ct2 = utils.get_test_cluster_template(id=2, - uuid=uuidutils.generate_uuid()) - self.dbapi.create_cluster_template(ct1) - self.dbapi.create_cluster_template(ct2) - - cluster1 = utils.create_test_cluster( - name='cluster-one', - uuid=uuidutils.generate_uuid(), - cluster_template_id=ct1['uuid'], - status=cluster_status.CREATE_IN_PROGRESS) - cluster2 = utils.create_test_cluster( - name='cluster-two', - uuid=uuidutils.generate_uuid(), - cluster_template_id=ct2['uuid'], - node_count=1, - master_count=1, - status=cluster_status.UPDATE_IN_PROGRESS) - cluster3 = utils.create_test_cluster( - name='cluster-three', - node_count=2, - master_count=5, - status=cluster_status.DELETE_IN_PROGRESS) - - res = self.dbapi.get_cluster_list( - self.context, filters={'cluster_template_id': ct1['uuid']}) - self.assertEqual([cluster1.id], [r.id for r in res]) - - res = self.dbapi.get_cluster_list( - self.context, filters={'cluster_template_id': ct2['uuid']}) - self.assertEqual([cluster2.id], [r.id for r in res]) - - res = self.dbapi.get_cluster_list(self.context, - filters={'name': 'cluster-one'}) - self.assertEqual([cluster1.id], [r.id for r in res]) - - res = self.dbapi.get_cluster_list(self.context, - filters={'name': 'bad-cluster'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_cluster_list(self.context, - filters={'node_count': 3}) - self.assertEqual([cluster1.id], [r.id for r in res]) - - res = self.dbapi.get_cluster_list(self.context, - filters={'node_count': 1}) - self.assertEqual([cluster2.id], [r.id for r in res]) - - res = self.dbapi.get_cluster_list(self.context, - filters={'master_count': 3}) - self.assertEqual([cluster1.id], [r.id for r in res]) - - res = self.dbapi.get_cluster_list(self.context, - filters={'master_count': 1}) - self.assertEqual([cluster2.id], [r.id for r in res]) - - filters = {'status': [cluster_status.CREATE_IN_PROGRESS, - cluster_status.DELETE_IN_PROGRESS]} - res = self.dbapi.get_cluster_list(self.context, - filters=filters) - self.assertEqual([cluster1.id, cluster3.id], [r.id for r in res]) - - def test_get_cluster_list_by_admin_all_tenants(self): - uuids = [] - for i in range(1, 6): - cluster = utils.create_test_cluster( - uuid=uuidutils.generate_uuid(), - project_id=uuidutils.generate_uuid(), - user_id=uuidutils.generate_uuid()) - uuids.append(six.text_type(cluster['uuid'])) - ctx = context.make_admin_context(all_tenants=True) - res = self.dbapi.get_cluster_list(ctx) - res_uuids = [r.uuid for r in res] - self.assertEqual(sorted(uuids), sorted(res_uuids)) - - def test_get_cluster_list_cluster_template_not_exist(self): - utils.create_test_cluster() - self.assertEqual(1, len(self.dbapi.get_cluster_list(self.context))) - res = self.dbapi.get_cluster_list(self.context, filters={ - 'cluster_template_id': uuidutils.generate_uuid()}) - self.assertEqual(0, len(res)) - - def test_destroy_cluster(self): - cluster = utils.create_test_cluster() - self.assertIsNotNone(self.dbapi.get_cluster_by_id(self.context, - cluster.id)) - self.dbapi.destroy_cluster(cluster.id) - self.assertRaises(exception.ClusterNotFound, - self.dbapi.get_cluster_by_id, - self.context, cluster.id) - - def test_destroy_cluster_by_uuid(self): - cluster = utils.create_test_cluster() - self.assertIsNotNone(self.dbapi.get_cluster_by_uuid(self.context, - cluster.uuid)) - self.dbapi.destroy_cluster(cluster.uuid) - self.assertRaises(exception.ClusterNotFound, - self.dbapi.get_cluster_by_uuid, self.context, - cluster.uuid) - - def test_destroy_cluster_by_id_that_does_not_exist(self): - self.assertRaises(exception.ClusterNotFound, - self.dbapi.destroy_cluster, - '12345678-9999-0000-aaaa-123456789012') - - def test_destroy_cluster_by_uuid_that_does_not_exist(self): - self.assertRaises(exception.ClusterNotFound, - self.dbapi.destroy_cluster, '999') - - def test_update_cluster(self): - cluster = utils.create_test_cluster() - old_nc = cluster.node_count - new_nc = 5 - self.assertNotEqual(old_nc, new_nc) - res = self.dbapi.update_cluster(cluster.id, {'node_count': new_nc}) - self.assertEqual(new_nc, res.node_count) - - def test_update_cluster_not_found(self): - cluster_uuid = uuidutils.generate_uuid() - self.assertRaises(exception.ClusterNotFound, self.dbapi.update_cluster, - cluster_uuid, {'node_count': 5}) - - def test_update_cluster_uuid(self): - cluster = utils.create_test_cluster() - self.assertRaises(exception.InvalidParameterValue, - self.dbapi.update_cluster, cluster.id, - {'uuid': ''}) diff --git a/magnum/tests/unit/db/test_cluster_template.py b/magnum/tests/unit/db/test_cluster_template.py deleted file mode 100644 index cb88c552..00000000 --- a/magnum/tests/unit/db/test_cluster_template.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating ClusterTemplate via the DB API""" -from oslo_utils import uuidutils -import six - -from magnum.common import exception -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class DbClusterTemplateTestCase(base.DbTestCase): - - def test_create_cluster_template(self): - utils.create_test_cluster_template() - - def test_get_cluster_template_list(self): - uuids = [] - for i in range(1, 6): - ct = utils.create_test_cluster_template( - id=i, uuid=uuidutils.generate_uuid()) - uuids.append(six.text_type(ct['uuid'])) - res = self.dbapi.get_cluster_template_list(self.context) - res_uuids = [r.uuid for r in res] - self.assertEqual(sorted(uuids), sorted(res_uuids)) - - def test_get_cluster_template_list_sorted(self): - uuids = [] - for _ in range(5): - ct = utils.create_test_cluster_template( - uuid=uuidutils.generate_uuid()) - uuids.append(six.text_type(ct['uuid'])) - res = self.dbapi.get_cluster_template_list(self.context, - sort_key='uuid') - res_uuids = [r.uuid for r in res] - self.assertEqual(sorted(uuids), res_uuids) - - self.assertRaises(exception.InvalidParameterValue, - self.dbapi.get_cluster_template_list, - self.context, - sort_key='foo') - - def test_get_cluster_template_list_with_filters(self): - ct1 = utils.create_test_cluster_template( - id=1, - name='ct-one', - uuid=uuidutils.generate_uuid(), - image_id='image1') - ct2 = utils.create_test_cluster_template( - id=2, - name='ct-two', - uuid=uuidutils.generate_uuid(), - image_id='image2') - - res = self.dbapi.get_cluster_template_list(self.context, - filters={'name': 'ct-one'}) - self.assertEqual([ct1['id']], [r.id for r in res]) - - res = self.dbapi.get_cluster_template_list( - self.context, filters={'name': 'bad-name'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_cluster_template_list( - self.context, filters={'image_id': 'image1'}) - self.assertEqual([ct1['id']], [r.id for r in res]) - - res = self.dbapi.get_cluster_template_list( - self.context, filters={'image_id': 'image2'}) - self.assertEqual([ct2['id']], [r.id for r in res]) - - def test_get_cluster_template_by_id(self): - ct = utils.create_test_cluster_template() - cluster_template = self.dbapi.get_cluster_template_by_id( - self.context, ct['id']) - self.assertEqual(ct['uuid'], cluster_template.uuid) - - def test_get_cluster_template_by_id_public(self): - ct = utils.create_test_cluster_template(user_id='not_me', public=True) - cluster_template = self.dbapi.get_cluster_template_by_id( - self.context, ct['id']) - self.assertEqual(ct['uuid'], cluster_template.uuid) - - def test_get_cluster_template_by_uuid(self): - ct = utils.create_test_cluster_template() - cluster_template = self.dbapi.get_cluster_template_by_uuid( - self.context, ct['uuid']) - self.assertEqual(ct['id'], cluster_template.id) - - def test_get_cluster_template_by_uuid_public(self): - ct = utils.create_test_cluster_template(user_id='not_me', public=True) - cluster_template = self.dbapi.get_cluster_template_by_uuid( - self.context, ct['uuid']) - self.assertEqual(ct['id'], cluster_template.id) - - def test_get_cluster_template_that_does_not_exist(self): - self.assertRaises(exception.ClusterTemplateNotFound, - self.dbapi.get_cluster_template_by_id, - self.context, 666) - - def test_get_cluster_template_by_name(self): - ct = utils.create_test_cluster_template() - res = self.dbapi.get_cluster_template_by_name(self.context, ct['name']) - self.assertEqual(ct['id'], res.id) - self.assertEqual(ct['uuid'], res.uuid) - - def test_get_cluster_template_by_name_public(self): - ct = utils.create_test_cluster_template(user_id='not_me', public=True) - res = self.dbapi.get_cluster_template_by_name(self.context, ct['name']) - self.assertEqual(ct['id'], res.id) - self.assertEqual(ct['uuid'], res.uuid) - - def test_get_cluster_template_by_name_multiple_cluster_template(self): - utils.create_test_cluster_template( - id=1, name='ct', - uuid=uuidutils.generate_uuid(), - image_id='image1') - utils.create_test_cluster_template( - id=2, name='ct', - uuid=uuidutils.generate_uuid(), - image_id='image2') - self.assertRaises(exception.Conflict, - self.dbapi.get_cluster_template_by_name, - self.context, 'ct') - - def test_get_cluster_template_by_name_not_found(self): - self.assertRaises(exception.ClusterTemplateNotFound, - self.dbapi.get_cluster_template_by_name, - self.context, 'not_found') - - def test_get_cluster_template_by_uuid_that_does_not_exist(self): - self.assertRaises(exception.ClusterTemplateNotFound, - self.dbapi.get_cluster_template_by_uuid, - self.context, - '12345678-9999-0000-aaaa-123456789012') - - def test_update_cluster_template(self): - ct = utils.create_test_cluster_template() - res = self.dbapi.update_cluster_template(ct['id'], - {'name': 'updated-model'}) - self.assertEqual('updated-model', res.name) - - def test_update_cluster_template_that_does_not_exist(self): - self.assertRaises(exception.ClusterTemplateNotFound, - self.dbapi.update_cluster_template, 666, - {'name': ''}) - - def test_update_cluster_template_uuid(self): - ct = utils.create_test_cluster_template() - self.assertRaises(exception.InvalidParameterValue, - self.dbapi.update_cluster_template, ct['id'], - {'uuid': 'hello'}) - - def test_destroy_cluster_template(self): - ct = utils.create_test_cluster_template() - self.dbapi.destroy_cluster_template(ct['id']) - self.assertRaises(exception.ClusterTemplateNotFound, - self.dbapi.get_cluster_template_by_id, - self.context, ct['id']) - - def test_destroy_cluster_template_by_uuid(self): - uuid = uuidutils.generate_uuid() - utils.create_test_cluster_template(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_cluster_template_by_uuid( - self.context, uuid)) - self.dbapi.destroy_cluster_template(uuid) - self.assertRaises(exception.ClusterTemplateNotFound, - self.dbapi.get_cluster_template_by_uuid, - self.context, uuid) - - def test_destroy_cluster_template_that_does_not_exist(self): - self.assertRaises(exception.ClusterTemplateNotFound, - self.dbapi.destroy_cluster_template, 666) - - def test_destroy_cluster_template_that_referenced_by_clusters(self): - ct = utils.create_test_cluster_template() - cluster = utils.create_test_cluster(cluster_template_id=ct['uuid']) - self.assertEqual(ct['uuid'], cluster.cluster_template_id) - self.assertRaises(exception.ClusterTemplateReferenced, - self.dbapi.destroy_cluster_template, ct['id']) - - def test_create_cluster_template_already_exists(self): - uuid = uuidutils.generate_uuid() - utils.create_test_cluster_template(id=1, uuid=uuid) - self.assertRaises(exception.ClusterTemplateAlreadyExists, - utils.create_test_cluster_template, - id=2, uuid=uuid) diff --git a/magnum/tests/unit/db/test_magnum_service.py b/magnum/tests/unit/db/test_magnum_service.py deleted file mode 100644 index 60c1628d..00000000 --- a/magnum/tests/unit/db/test_magnum_service.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating MagnumService via the DB API""" - -from magnum.common import context # NOQA -from magnum.common import exception -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class DbMagnumServiceTestCase(base.DbTestCase): - - def test_create_magnum_service(self): - utils.create_test_magnum_service() - - def test_create_magnum_service_failure_for_dup(self): - utils.create_test_magnum_service() - self.assertRaises(exception.MagnumServiceAlreadyExists, - utils.create_test_magnum_service) - - def test_get_magnum_service_by_host_and_binary(self): - ms = utils.create_test_magnum_service() - res = self.dbapi.get_magnum_service_by_host_and_binary( - ms['host'], ms['binary']) - self.assertEqual(ms.id, res.id) - - def test_get_magnum_service_by_host_and_binary_failure(self): - utils.create_test_magnum_service() - res = self.dbapi.get_magnum_service_by_host_and_binary( - 'fakehost1', 'fake-bin1') - self.assertIsNone(res) - - def test_update_magnum_service(self): - ms = utils.create_test_magnum_service() - d2 = True - update = {'disabled': d2} - ms1 = self.dbapi.update_magnum_service(ms['id'], update) - self.assertEqual(ms['id'], ms1['id']) - self.assertEqual(d2, ms1['disabled']) - res = self.dbapi.get_magnum_service_by_host_and_binary( - 'fakehost', 'fake-bin') - self.assertEqual(ms1['id'], res['id']) - self.assertEqual(d2, res['disabled']) - - def test_update_magnum_service_failure(self): - ms = utils.create_test_magnum_service() - fake_update = {'fake_field': 'fake_value'} - self.assertRaises(exception.MagnumServiceNotFound, - self.dbapi.update_magnum_service, - ms['id'] + 1, fake_update) - - def test_destroy_magnum_service(self): - ms = utils.create_test_magnum_service() - res = self.dbapi.get_magnum_service_by_host_and_binary( - 'fakehost', 'fake-bin') - self.assertEqual(res['id'], ms['id']) - self.dbapi.destroy_magnum_service(ms['id']) - res = self.dbapi.get_magnum_service_by_host_and_binary( - 'fakehost', 'fake-bin') - self.assertIsNone(res) - - def test_destroy_magnum_service_failure(self): - ms = utils.create_test_magnum_service() - self.assertRaises(exception.MagnumServiceNotFound, - self.dbapi.destroy_magnum_service, - ms['id'] + 1) - - def test_get_magnum_service_list(self): - fake_ms_params = { - 'report_count': 1010, - 'host': 'FakeHost', - 'binary': 'FakeBin', - 'disabled': False, - 'disabled_reason': 'FakeReason' - } - utils.create_test_magnum_service(**fake_ms_params) - res = self.dbapi.get_magnum_service_list() - self.assertEqual(1, len(res)) - res = res[0] - for k, v in fake_ms_params.items(): - self.assertEqual(res[k], v) - - fake_ms_params['binary'] = 'FakeBin1' - fake_ms_params['disabled'] = True - utils.create_test_magnum_service(**fake_ms_params) - res = self.dbapi.get_magnum_service_list(disabled=True) - self.assertEqual(1, len(res)) - res = res[0] - for k, v in fake_ms_params.items(): - self.assertEqual(res[k], v) diff --git a/magnum/tests/unit/db/test_quota.py b/magnum/tests/unit/db/test_quota.py deleted file mode 100644 index d599b557..00000000 --- a/magnum/tests/unit/db/test_quota.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2016 Yahoo! Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating Quota via the DB API""" - -from magnum.common import exception -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class DbQuotaTestCase(base.DbTestCase): - - def test_create_quota(self): - utils.create_test_quotas() - - def test_create_quota_already_exists(self): - utils.create_test_quotas() - self.assertRaises(exception.QuotaAlreadyExists, - utils.create_test_quotas) - - def test_get_quota_all(self): - q = utils.create_test_quotas() - res = self.dbapi.quota_get_all_by_project_id( - project_id='fake_project') - for r in res: - self.assertEqual(q.id, r.id) - self.assertEqual(q.hard_limit, r.hard_limit) - self.assertEqual(q.project_id, r.project_id) - self.assertEqual(q.resource, r.resource) - - def test_get_quota_by_project_id_resource(self): - q = utils.create_test_quotas(project_id='123', - resource='test-res', - hard_limit=5) - res = self.dbapi.get_quota_by_project_id_resource('123', 'test-res') - self.assertEqual(q.hard_limit, res.hard_limit) - self.assertEqual(q.project_id, res.project_id) - self.assertEqual(q.resource, res.resource) - - def test_get_quota_by_project_id_resource_not_found(self): - utils.create_test_quotas(project_id='123', - resource='test-res', - hard_limit=5) - self.assertRaises(exception.QuotaNotFound, - self.dbapi.get_quota_by_project_id_resource, - project_id='123', - resource='bad-res') - - def test_get_quota_list(self): - project_ids = [] - for i in range(1, 6): - project_id = 'proj-'+str(i) - utils.create_test_quotas(project_id=project_id) - project_ids.append(project_id) - res = self.dbapi.get_quota_list(self.context) - res_proj_ids = [r.project_id for r in res] - self.assertEqual(sorted(project_ids), sorted(res_proj_ids)) - - def test_get_quota_list_sorted(self): - project_ids = [] - for i in range(1, 6): - project_id = 'proj-'+str(i) - utils.create_test_quotas(project_id=project_id) - project_ids.append(project_id) - res = self.dbapi.get_quota_list(self.context, sort_key='project_id') - res_proj_ids = [r.project_id for r in res] - self.assertEqual(sorted(project_ids), res_proj_ids) - - def test_get_quota_list_invalid_sort_key(self): - project_ids = [] - for i in range(1, 6): - project_id = 'proj-'+str(i) - utils.create_test_quotas(project_id=project_id) - project_ids.append(project_id) - - self.assertRaises(exception.InvalidParameterValue, - self.dbapi.get_quota_list, - self.context, - sort_key='invalid') - - def test_get_quota_list_with_filters(self): - quota1 = utils.create_test_quotas(project_id='proj-1', resource='res1') - quota2 = utils.create_test_quotas(project_id='proj-1', resource='res2') - quota3 = utils.create_test_quotas(project_id='proj-2', resource='res1') - - res = self.dbapi.get_quota_list( - self.context, filters={'resource': 'res2'}) - self.assertEqual(quota2.project_id, res[0].project_id) - - res = self.dbapi.get_quota_list( - self.context, filters={'project_id': 'proj-2'}) - self.assertEqual(quota3.project_id, res[0].project_id) - - res = self.dbapi.get_quota_list( - self.context, filters={'project_id': 'proj-1'}) - self.assertEqual(sorted([quota1.project_id, quota2.project_id]), - sorted([r.project_id for r in res])) - - def test_update_quota(self): - q = utils.create_test_quotas(hard_limit=5, - project_id='1234', - resource='Cluster') - - res = self.dbapi.get_quota_by_project_id_resource('1234', 'Cluster') - self.assertEqual(q.hard_limit, res.hard_limit) - self.assertEqual(q.project_id, res.project_id) - self.assertEqual(q.resource, res.resource) - quota_dict = {'resource': 'Cluster', 'hard_limit': 15} - self.dbapi.update_quota('1234', quota_dict) - res = self.dbapi.get_quota_by_project_id_resource('1234', 'Cluster') - self.assertEqual(quota_dict['hard_limit'], res.hard_limit) - self.assertEqual(quota_dict['resource'], res.resource) - - def test_update_quota_not_found(self): - utils.create_test_quotas(hard_limit=5, - project_id='1234', - resource='Cluster') - quota_dict = {'resource': 'Cluster', 'hard_limit': 15} - self.assertRaises(exception.QuotaNotFound, - self.dbapi.update_quota, - 'invalid_proj', - quota_dict) - - def test_delete_quota(self): - q = utils.create_test_quotas(project_id='123', - resource='test-res', - hard_limit=5) - res = self.dbapi.get_quota_by_project_id_resource('123', 'test-res') - self.assertEqual(q.hard_limit, res.hard_limit) - self.assertEqual(q.project_id, res.project_id) - self.assertEqual(q.resource, res.resource) - self.dbapi.delete_quota(q.project_id, q.resource) - self.assertRaises(exception.QuotaNotFound, - self.dbapi.get_quota_by_project_id_resource, - project_id='123', - resource='bad-res') - - def test_delete_quota_that_does_not_exist(self): - # Make sure that quota does not exist - self.assertRaises(exception.QuotaNotFound, - self.dbapi.get_quota_by_project_id_resource, - project_id='123', - resource='bad-res') - - # Now try to delete non-existing quota - self.assertRaises(exception.QuotaNotFound, - self.dbapi.delete_quota, - project_id='123', - resource='bad-res') diff --git a/magnum/tests/unit/db/test_x509keypair.py b/magnum/tests/unit/db/test_x509keypair.py deleted file mode 100644 index 99c9f25c..00000000 --- a/magnum/tests/unit/db/test_x509keypair.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2015 NEC Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating X509KeyPairs via the DB API""" - -from oslo_utils import uuidutils -import six - -from magnum.common import context -from magnum.common import exception -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class DbX509KeyPairTestCase(base.DbTestCase): - - def test_create_x509keypair(self): - utils.create_test_x509keypair() - - def test_create_x509keypair_already_exists(self): - utils.create_test_x509keypair() - self.assertRaises(exception.X509KeyPairAlreadyExists, - utils.create_test_x509keypair) - - def test_get_x509keypair_by_id(self): - x509keypair = utils.create_test_x509keypair() - res = self.dbapi.get_x509keypair_by_id(self.context, x509keypair.id) - self.assertEqual(x509keypair.id, res.id) - self.assertEqual(x509keypair.uuid, res.uuid) - - def test_get_x509keypair_by_uuid(self): - x509keypair = utils.create_test_x509keypair() - res = self.dbapi.get_x509keypair_by_uuid(self.context, - x509keypair.uuid) - self.assertEqual(x509keypair.id, res.id) - self.assertEqual(x509keypair.uuid, res.uuid) - - def test_get_x509keypair_that_does_not_exist(self): - self.assertRaises(exception.X509KeyPairNotFound, - self.dbapi.get_x509keypair_by_id, - self.context, 999) - self.assertRaises(exception.X509KeyPairNotFound, - self.dbapi.get_x509keypair_by_uuid, - self.context, - '12345678-9999-0000-aaaa-123456789012') - - def test_get_x509keypair_list(self): - uuids = [] - for i in range(1, 6): - x509keypair = utils.create_test_x509keypair( - uuid=uuidutils.generate_uuid()) - uuids.append(six.text_type(x509keypair['uuid'])) - res = self.dbapi.get_x509keypair_list(self.context) - res_uuids = [r.uuid for r in res] - self.assertEqual(sorted(uuids), sorted(res_uuids)) - - def test_get_x509keypair_list_by_admin_all_tenants(self): - uuids = [] - for i in range(1, 6): - x509keypair = utils.create_test_x509keypair( - uuid=uuidutils.generate_uuid(), - project_id=uuidutils.generate_uuid(), - user_id=uuidutils.generate_uuid()) - uuids.append(six.text_type(x509keypair['uuid'])) - ctx = context.make_admin_context(all_tenants=True) - res = self.dbapi.get_x509keypair_list(ctx) - res_uuids = [r.uuid for r in res] - self.assertEqual(sorted(uuids), sorted(res_uuids)) - - def test_destroy_x509keypair(self): - x509keypair = utils.create_test_x509keypair() - self.assertIsNotNone(self.dbapi.get_x509keypair_by_id( - self.context, x509keypair.id)) - self.dbapi.destroy_x509keypair(x509keypair.id) - self.assertRaises(exception.X509KeyPairNotFound, - self.dbapi.get_x509keypair_by_id, - self.context, x509keypair.id) - - def test_destroy_x509keypair_by_uuid(self): - x509keypair = utils.create_test_x509keypair() - self.assertIsNotNone(self.dbapi.get_x509keypair_by_uuid( - self.context, x509keypair.uuid)) - self.dbapi.destroy_x509keypair(x509keypair.uuid) - self.assertRaises(exception.X509KeyPairNotFound, - self.dbapi.get_x509keypair_by_uuid, self.context, - x509keypair.uuid) - - def test_destroy_x509keypair_that_does_not_exist(self): - self.assertRaises(exception.X509KeyPairNotFound, - self.dbapi.destroy_x509keypair, - '12345678-9999-0000-aaaa-123456789012') diff --git a/magnum/tests/unit/db/utils.py b/magnum/tests/unit/db/utils.py deleted file mode 100644 index c899f42c..00000000 --- a/magnum/tests/unit/db/utils.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Magnum test utilities.""" - - -from magnum.db import api as db_api - - -def get_test_cluster_template(**kw): - return { - 'id': kw.get('id', 32), - 'project_id': kw.get('project_id', 'fake_project'), - 'user_id': kw.get('user_id', 'fake_user'), - 'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), - 'name': kw.get('name', 'clustermodel1'), - 'image_id': kw.get('image_id', 'ubuntu'), - 'flavor_id': kw.get('flavor_id', 'm1.small'), - 'master_flavor_id': kw.get('master_flavor_id', 'm1.small'), - 'keypair_id': kw.get('keypair_id', 'keypair1'), - 'external_network_id': kw.get('external_network_id', - 'd1f02cfb-d27f-4068-9332-84d907cb0e2e'), - 'fixed_network': kw.get('fixed_network', 'private'), - 'fixed_subnet': kw.get('fixed_network', 'private-subnet'), - 'network_driver': kw.get('network_driver'), - 'volume_driver': kw.get('volume_driver'), - 'dns_nameserver': kw.get('dns_nameserver', '8.8.1.1'), - 'apiserver_port': kw.get('apiserver_port', 8080), - 'docker_volume_size': kw.get('docker_volume_size', 20), - 'docker_storage_driver': kw.get('docker_storage_driver', - 'devicemapper'), - 'cluster_distro': kw.get('cluster_distro', 'fedora-atomic'), - 'coe': kw.get('coe', 'swarm'), - 'created_at': kw.get('created_at'), - 'updated_at': kw.get('updated_at'), - 'labels': kw.get('labels', {'key1': 'val1', 'key2': 'val2'}), - 'http_proxy': kw.get('http_proxy', 'fake_http_proxy'), - 'https_proxy': kw.get('https_proxy', 'fake_https_proxy'), - 'no_proxy': kw.get('no_proxy', 'fake_no_proxy'), - 'registry_enabled': kw.get('registry_enabled', False), - 'tls_disabled': kw.get('tls_disabled', False), - 'public': kw.get('public', False), - 'server_type': kw.get('server_type', 'vm'), - 'insecure_registry': kw.get('insecure_registry', '10.0.0.1:5000'), - 'master_lb_enabled': kw.get('master_lb_enabled', True), - 'floating_ip_enabled': kw.get('floating_ip_enabled', True), - } - - -def create_test_cluster_template(**kw): - """Create and return test ClusterTemplate DB object. - - Function to be used to create test ClusterTemplate objects in the database. - :param kw: kwargs with overriding values for ClusterTemplate's attributes. - :returns: Test ClusterTemplate DB object. - """ - cluster_template = get_test_cluster_template(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del cluster_template['id'] - dbapi = db_api.get_instance() - return dbapi.create_cluster_template(cluster_template) - - -def get_test_cluster(**kw): - attrs = { - 'id': kw.get('id', 42), - 'uuid': kw.get('uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), - 'name': kw.get('name', 'cluster1'), - 'discovery_url': kw.get('discovery_url', None), - 'ca_cert_ref': kw.get('ca_cert_ref', None), - 'magnum_cert_ref': kw.get('magnum_cert_ref', None), - 'project_id': kw.get('project_id', 'fake_project'), - 'user_id': kw.get('user_id', 'fake_user'), - 'cluster_template_id': kw.get('cluster_template_id', - 'e74c40e0-d825-11e2-a28f-0800200c9a66'), - 'stack_id': kw.get('stack_id', '047c6319-7abd-4bd9-a033-8c6af0173cd0'), - 'status': kw.get('status', 'CREATE_IN_PROGRESS'), - 'status_reason': kw.get('status_reason', 'Completed successfully'), - 'create_timeout': kw.get('create_timeout', 60), - 'api_address': kw.get('api_address', '172.17.2.3'), - 'node_addresses': kw.get('node_addresses', ['172.17.2.4']), - 'node_count': kw.get('node_count', 3), - 'master_count': kw.get('master_count', 3), - 'master_addresses': kw.get('master_addresses', ['172.17.2.18']), - 'created_at': kw.get('created_at'), - 'updated_at': kw.get('updated_at'), - 'docker_volume_size': kw.get('docker_volume_size'), - } - - # Only add Keystone trusts related attributes on demand since they may - # break other tests. - for attr in ['trustee_username', 'trustee_password', 'trust_id']: - if attr in kw: - attrs[attr] = kw[attr] - - return attrs - - -def create_test_cluster(**kw): - """Create test cluster entry in DB and return Cluster DB object. - - Function to be used to create test Cluster objects in the database. - :param kw: kwargs with overriding values for cluster's attributes. - :returns: Test Cluster DB object. - """ - cluster = get_test_cluster(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del cluster['id'] - dbapi = db_api.get_instance() - return dbapi.create_cluster(cluster) - - -def get_test_quota(**kw): - attrs = { - 'id': kw.get('id', 42), - 'project_id': kw.get('project_id', 'fake_project'), - 'resource': kw.get('resource', 'Cluster'), - 'hard_limit': kw.get('hard_limit', 10) - } - - return attrs - - -def create_test_quota(**kw): - """Create test quota entry in DB and return Quota DB object. - - Function to be used to create test Quota objects in the database. - :param kw: kwargs with overriding values for quota's attributes. - :returns: Test Quota DB object. - """ - quota = get_test_quota(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del quota['id'] - dbapi = db_api.get_instance() - return dbapi.create_quota(quota) - - -def get_test_x509keypair(**kw): - return { - 'id': kw.get('id', 42), - 'uuid': kw.get('uuid', '72625085-c507-4410-9b28-cd7cf1fbf1ad'), - 'project_id': kw.get('project_id', 'fake_project'), - 'user_id': kw.get('user_id', 'fake_user'), - 'certificate': kw.get('certificate', - 'certificate'), - 'private_key': kw.get('private_key', 'private_key'), - 'private_key_passphrase': kw.get('private_key_passphrase', - 'private_key_passphrase'), - 'intermediates': kw.get('intermediates', 'intermediates'), - 'created_at': kw.get('created_at'), - 'updated_at': kw.get('updated_at'), - } - - -def create_test_x509keypair(**kw): - """Create test x509keypair entry in DB and return X509KeyPair DB object. - - Function to be used to create test X509KeyPair objects in the database. - :param kw: kwargs with overriding values for x509keypair's attributes. - :returns: Test X509KeyPair DB object. - """ - x509keypair = get_test_x509keypair(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del x509keypair['id'] - dbapi = db_api.get_instance() - return dbapi.create_x509keypair(x509keypair) - - -def get_test_magnum_service(**kw): - return { - 'id': kw.get('', 13), - 'report_count': kw.get('report_count', 13), - 'host': kw.get('host', 'fakehost'), - 'binary': kw.get('binary', 'fake-bin'), - 'disabled': kw.get('disabled', False), - 'disabled_reason': kw.get('disabled_reason', 'fake-reason'), - 'forced_down': kw.get('forced_down', False), - 'last_seen_up': kw.get('last_seen_up'), - 'created_at': kw.get('created_at'), - 'updated_at': kw.get('updated_at'), - } - - -def create_test_magnum_service(**kw): - """Create test magnum_service entry in DB and return magnum_service DB object. - - :param kw: kwargs with overriding values for magnum_service's attributes. - :returns: Test magnum_service DB object. - """ - magnum_service = get_test_magnum_service(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del magnum_service['id'] - dbapi = db_api.get_instance() - return dbapi.create_magnum_service(magnum_service) - - -def get_test_quotas(**kw): - return { - 'id': kw.get('', 18), - 'project_id': kw.get('project_id', 'fake_project'), - 'resource': kw.get('resource', 'Cluster'), - 'hard_limit': kw.get('hard_limit', 10), - 'created_at': kw.get('created_at'), - 'updated_at': kw.get('updated_at'), - } - - -def create_test_quotas(**kw): - """Create test quotas entry in DB and return quotas DB object. - - :param kw: kwargs with overriding values for quota attributes. - :returns: Test quotas DB object. - """ - quotas = get_test_quotas(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del quotas['id'] - dbapi = db_api.get_instance() - return dbapi.create_quota(quotas) diff --git a/magnum/tests/unit/drivers/__init__.py b/magnum/tests/unit/drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/drivers/test_heat_driver.py b/magnum/tests/unit/drivers/test_heat_driver.py deleted file mode 100644 index 9e05210b..00000000 --- a/magnum/tests/unit/drivers/test_heat_driver.py +++ /dev/null @@ -1,194 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from mock import patch - -import magnum.conf -from magnum.drivers.heat import driver as heat_driver -from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_atomic_dr -from magnum import objects -from magnum.objects.fields import ClusterStatus as cluster_status -from magnum.tests import base -from magnum.tests.unit.db import utils - -CONF = magnum.conf.CONF - - -class TestHeatPoller(base.TestCase): - - @patch('magnum.conductor.utils.retrieve_cluster_template') - @patch('oslo_config.cfg') - @patch('magnum.common.clients.OpenStackClients') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def setup_poll_test(self, mock_driver, mock_openstack_client, cfg, - mock_retrieve_cluster_template): - cfg.CONF.cluster_heat.max_attempts = 10 - - cluster = mock.MagicMock() - cluster_template_dict = utils.get_test_cluster_template( - coe='kubernetes') - mock_heat_stack = mock.MagicMock() - mock_heat_client = mock.MagicMock() - mock_heat_client.stacks.get.return_value = mock_heat_stack - mock_openstack_client.heat.return_value = mock_heat_client - cluster_template = objects.ClusterTemplate(self.context, - **cluster_template_dict) - mock_retrieve_cluster_template.return_value = cluster_template - mock_driver.return_value = k8s_atomic_dr.Driver() - poller = heat_driver.HeatPoller(mock_openstack_client, - mock.MagicMock(), cluster, - k8s_atomic_dr.Driver()) - poller.get_version_info = mock.MagicMock() - return (mock_heat_stack, cluster, poller) - - def test_poll_no_save(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - cluster.status = cluster_status.CREATE_IN_PROGRESS - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - poller.poll_and_check() - self.assertEqual(0, cluster.save.call_count) - - def test_poll_save(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - cluster.status = cluster_status.CREATE_IN_PROGRESS - mock_heat_stack.stack_status = cluster_status.CREATE_FAILED - mock_heat_stack.stack_status_reason = 'Create failed' - self.assertIsNone(poller.poll_and_check()) - - self.assertEqual(2, cluster.save.call_count) - self.assertEqual(cluster_status.CREATE_FAILED, cluster.status) - self.assertEqual('Create failed', cluster.status_reason) - - def test_poll_done(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE - self.assertIsNone(poller.poll_and_check()) - - mock_heat_stack.stack_status = cluster_status.CREATE_FAILED - self.assertIsNone(poller.poll_and_check()) - - def test_poll_done_by_update(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE - mock_heat_stack.parameters = {'number_of_minions': 2} - self.assertIsNone(poller.poll_and_check()) - - self.assertEqual(1, cluster.save.call_count) - self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) - self.assertEqual(2, cluster.node_count) - - def test_poll_done_by_update_failed(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED - mock_heat_stack.parameters = {'number_of_minions': 2} - self.assertIsNone(poller.poll_and_check()) - - self.assertEqual(2, cluster.save.call_count) - self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) - self.assertEqual(2, cluster.node_count) - - def test_poll_done_by_rollback_complete(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.ROLLBACK_COMPLETE - mock_heat_stack.parameters = {'number_of_minions': 1} - self.assertIsNone(poller.poll_and_check()) - - self.assertEqual(2, cluster.save.call_count) - self.assertEqual(cluster_status.ROLLBACK_COMPLETE, cluster.status) - self.assertEqual(1, cluster.node_count) - - def test_poll_done_by_rollback_failed(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.ROLLBACK_FAILED - mock_heat_stack.parameters = {'number_of_minions': 1} - self.assertIsNone(poller.poll_and_check()) - - self.assertEqual(2, cluster.save.call_count) - self.assertEqual(cluster_status.ROLLBACK_FAILED, cluster.status) - self.assertEqual(1, cluster.node_count) - - def test_poll_destroy(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.DELETE_FAILED - self.assertIsNone(poller.poll_and_check()) - # Destroy method is not called when stack delete failed - self.assertEqual(0, cluster.destroy.call_count) - - mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS - poller.poll_and_check() - self.assertEqual(0, cluster.destroy.call_count) - self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status) - - mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE - self.assertIsNone(poller.poll_and_check()) - # destroy and notifications are handled up the stack now - self.assertEqual(cluster_status.DELETE_COMPLETE, cluster.status) - - def test_poll_node_count(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.parameters = {'number_of_minions': 1} - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - poller.poll_and_check() - - self.assertEqual(1, cluster.node_count) - - def test_poll_node_count_by_update(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.parameters = {'number_of_minions': 2} - mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE - self.assertIsNone(poller.poll_and_check()) - - self.assertEqual(2, cluster.node_count) - - @patch('magnum.drivers.heat.driver.trust_manager') - @patch('magnum.drivers.heat.driver.cert_manager') - def test_delete_complete(self, cert_manager, trust_manager): - mock_heat_stack, cluster, poller = self.setup_poll_test() - poller._delete_complete() - self.assertEqual( - 1, cert_manager.delete_certificates_from_cluster.call_count) - self.assertEqual(1, trust_manager.delete_trustee_and_trust.call_count) - - def test_create_or_complete(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE - mock_heat_stack.stack_status_reason = 'stack complete' - poller._sync_cluster_and_template_status(mock_heat_stack) - self.assertEqual('stack complete', cluster.status_reason) - self.assertEqual(cluster_status.CREATE_COMPLETE, cluster.status) - self.assertEqual(1, cluster.save.call_count) - - def test_sync_cluster_status(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - mock_heat_stack.stack_status_reason = 'stack incomplete' - poller._sync_cluster_status(mock_heat_stack) - self.assertEqual('stack incomplete', cluster.status_reason) - self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) - - @patch('magnum.drivers.heat.driver.LOG') - def test_cluster_failed(self, logger): - mock_heat_stack, cluster, poller = self.setup_poll_test() - poller._sync_cluster_and_template_status(mock_heat_stack) - poller._cluster_failed(mock_heat_stack) - self.assertEqual(1, logger.error.call_count) diff --git a/magnum/tests/unit/drivers/test_template_definition.py b/magnum/tests/unit/drivers/test_template_definition.py deleted file mode 100644 index 890474db..00000000 --- a/magnum/tests/unit/drivers/test_template_definition.py +++ /dev/null @@ -1,1013 +0,0 @@ -# Copyright 2015 Rackspace Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import mock -from neutronclient.common import exceptions as n_exception -import six - -from magnum.common import exception -import magnum.conf -from magnum.drivers.common import driver -from magnum.drivers.heat import template_def as cmn_tdef -from magnum.drivers.k8s_coreos_v1 import driver as k8s_coreos_dr -from magnum.drivers.k8s_coreos_v1 import template_def as k8s_coreos_tdef -from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8sa_dr -from magnum.drivers.k8s_fedora_atomic_v1 import template_def as k8sa_tdef -from magnum.drivers.k8s_fedora_ironic_v1 import driver as k8s_i_dr -from magnum.drivers.k8s_fedora_ironic_v1 import template_def as k8si_tdef -from magnum.drivers.mesos_ubuntu_v1 import driver as mesos_dr -from magnum.drivers.mesos_ubuntu_v1 import template_def as mesos_tdef -from magnum.drivers.swarm_fedora_atomic_v1 import driver as swarm_dr -from magnum.drivers.swarm_fedora_atomic_v1 import template_def as swarm_tdef -from magnum.tests import base - -from requests import exceptions as req_exceptions - -CONF = magnum.conf.CONF - - -class TemplateDefinitionTestCase(base.TestCase): - - @mock.patch.object(driver, 'iter_entry_points') - def test_load_entry_points(self, mock_iter_entry_points): - mock_entry_point = mock.MagicMock() - mock_entry_points = [mock_entry_point] - mock_iter_entry_points.return_value = mock_entry_points.__iter__() - - entry_points = driver.Driver.load_entry_points() - - for (expected_entry_point, - (actual_entry_point, loaded_cls)) in zip(mock_entry_points, - entry_points): - self.assertEqual(expected_entry_point, actual_entry_point) - expected_entry_point.load.assert_called_once_with(require=False) - - @mock.patch('magnum.drivers.common.driver.Driver.get_driver') - def test_get_vm_atomic_kubernetes_definition(self, mock_driver): - mock_driver.return_value = k8sa_dr.Driver() - cluster_driver = driver.Driver.get_driver('vm', - 'fedora-atomic', - 'kubernetes') - definition = cluster_driver.get_template_definition() - - self.assertIsInstance(definition, - k8sa_tdef.AtomicK8sTemplateDefinition) - - @mock.patch('magnum.drivers.common.driver.Driver.get_driver') - def test_get_bm_fedora_kubernetes_ironic_definition(self, mock_driver): - mock_driver.return_value = k8s_i_dr.Driver() - cluster_driver = driver.Driver.get_driver('bm', - 'fedora', - 'kubernetes') - definition = cluster_driver.get_template_definition() - - self.assertIsInstance(definition, - k8si_tdef.FedoraK8sIronicTemplateDefinition) - - @mock.patch('magnum.drivers.common.driver.Driver.get_driver') - def test_get_vm_coreos_kubernetes_definition(self, mock_driver): - mock_driver.return_value = k8s_coreos_dr.Driver() - cluster_driver = driver.Driver.get_driver('vm', 'coreos', 'kubernetes') - definition = cluster_driver.get_template_definition() - - self.assertIsInstance(definition, - k8s_coreos_tdef.CoreOSK8sTemplateDefinition) - - @mock.patch('magnum.drivers.common.driver.Driver.get_driver') - def test_get_vm_atomic_swarm_definition(self, mock_driver): - mock_driver.return_value = swarm_dr.Driver() - cluster_driver = driver.Driver.get_driver('vm', - 'fedora-atomic', - 'swarm') - definition = cluster_driver.get_template_definition() - - self.assertIsInstance(definition, - swarm_tdef.AtomicSwarmTemplateDefinition) - - @mock.patch('magnum.drivers.common.driver.Driver.get_driver') - def test_get_vm_ubuntu_mesos_definition(self, mock_driver): - mock_driver.return_value = mesos_dr.Driver() - cluster_driver = driver.Driver.get_driver('vm', - 'ubuntu', - 'mesos') - definition = cluster_driver.get_template_definition() - - self.assertIsInstance(definition, - mesos_tdef.UbuntuMesosTemplateDefinition) - - def test_get_driver_not_supported(self): - self.assertRaises(exception.ClusterTypeNotSupported, - driver.Driver.get_driver, - 'vm', 'not_supported', 'kubernetes') - - def test_required_param_not_set(self): - param = cmn_tdef.ParameterMapping('test', cluster_template_attr='test', - required=True) - mock_cluster_template = mock.MagicMock() - mock_cluster_template.test = None - - self.assertRaises(exception.RequiredParameterNotProvided, - param.set_param, {}, mock_cluster_template, None) - - def test_output_mapping(self): - heat_outputs = [ - { - "output_value": "value1", - "description": "No description given", - "output_key": "key1" - }, - { - "output_value": ["value2", "value3"], - "description": "No description given", - "output_key": "key2" - } - ] - - mock_stack = mock.MagicMock() - mock_stack.to_dict.return_value = {'outputs': heat_outputs} - - output = cmn_tdef.OutputMapping('key1') - value = output.get_output_value(mock_stack) - self.assertEqual('value1', value) - - output = cmn_tdef.OutputMapping('key2') - value = output.get_output_value(mock_stack) - self.assertEqual(["value2", "value3"], value) - - output = cmn_tdef.OutputMapping('key3') - value = output.get_output_value(mock_stack) - self.assertIsNone(value) - - # verify stack with no 'outputs' attribute - mock_stack.to_dict.return_value = {} - output = cmn_tdef.OutputMapping('key1') - value = output.get_output_value(mock_stack) - self.assertIsNone(value) - - def test_add_output_with_mapping_type(self): - definition = k8sa_dr.Driver().get_template_definition() - - mock_args = [1, 3, 4] - mock_kwargs = {'test': 'test'} - mock_mapping_type = mock.MagicMock() - mock_mapping_type.return_value = mock.MagicMock() - definition.add_output(mapping_type=mock_mapping_type, *mock_args, - **mock_kwargs) - - mock_mapping_type.assert_called_once_with(*mock_args, **mock_kwargs) - self.assertIn(mock_mapping_type.return_value, - definition.output_mappings) - - -@six.add_metaclass(abc.ABCMeta) -class BaseTemplateDefinitionTestCase(base.TestCase): - - @abc.abstractmethod - def get_definition(self): - """Returns the template definition.""" - pass - - def _test_update_outputs_server_addrtess( - self, - floating_ip_enabled=True, - public_ip_output_key='kube_masters', - private_ip_output_key='kube_masters_private', - cluster_attr='master_addresses', - ): - - definition = self.get_definition() - - expected_address = expected_public_address = ['public'] - expected_private_address = ['private'] - if not floating_ip_enabled: - expected_address = expected_private_address - - outputs = [ - {"output_value": expected_public_address, - "description": "No description given", - "output_key": public_ip_output_key}, - {"output_value": expected_private_address, - "description": "No description given", - "output_key": private_ip_output_key}, - ] - mock_stack = mock.MagicMock() - mock_stack.to_dict.return_value = {'outputs': outputs} - mock_cluster = mock.MagicMock() - mock_cluster_template = mock.MagicMock() - mock_cluster_template.floating_ip_enabled = floating_ip_enabled - - definition.update_outputs(mock_stack, mock_cluster_template, - mock_cluster) - - self.assertEqual(expected_address, getattr(mock_cluster, cluster_attr)) - - -class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase): - - def get_definition(self): - return k8sa_dr.Driver().get_template_definition() - - @mock.patch('magnum.common.clients.OpenStackClients') - @mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def' - '.AtomicK8sTemplateDefinition.get_discovery_url') - @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' - '.get_params') - @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' - '.get_output') - def test_k8s_get_params(self, mock_get_output, mock_get_params, - mock_get_discovery_url, mock_osc_class): - mock_context = mock.MagicMock() - mock_context.auth_token = 'AUTH_TOKEN' - mock_cluster_template = mock.MagicMock() - mock_cluster_template.tls_disabled = False - mock_cluster_template.registry_enabled = False - mock_cluster = mock.MagicMock() - mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' - del mock_cluster.stack_id - mock_scale_manager = mock.MagicMock() - mock_osc = mock.MagicMock() - mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' - mock_osc.cinder_region_name.return_value = 'RegionOne' - mock_osc_class.return_value = mock_osc - - removal_nodes = ['node1', 'node2'] - mock_scale_manager.get_removal_nodes.return_value = removal_nodes - mock_get_discovery_url.return_value = 'fake_discovery_url' - - mock_context.auth_url = 'http://192.168.10.10:5000/v3' - mock_context.user_name = 'fake_user' - mock_context.tenant = 'fake_tenant' - - flannel_cidr = mock_cluster_template.labels.get('flannel_network_cidr') - flannel_subnet = mock_cluster_template.labels.get( - 'flannel_network_subnetlen') - flannel_backend = mock_cluster_template.labels.get('flannel_backend') - system_pods_initial_delay = mock_cluster_template.labels.get( - 'system_pods_initial_delay') - system_pods_timeout = mock_cluster_template.labels.get( - 'system_pods_timeout') - admission_control_list = mock_cluster_template.labels.get( - 'admission_control_list') - prometheus_monitoring = mock_cluster_template.labels.get( - 'prometheus_monitoring') - grafana_admin_passwd = mock_cluster_template.labels.get( - 'grafana_admin_passwd') - kube_dashboard_enabled = mock_cluster_template.labels.get( - 'kube_dashboard_enabled') - docker_volume_type = mock_cluster_template.labels.get( - 'docker_volume_type') - etcd_volume_size = mock_cluster_template.labels.get( - 'etcd_volume_size') - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - - k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster, - scale_manager=mock_scale_manager) - - expected_kwargs = {'extra_params': { - 'minions_to_remove': removal_nodes, - 'discovery_url': 'fake_discovery_url', - 'flannel_network_cidr': flannel_cidr, - 'flannel_network_subnetlen': flannel_subnet, - 'flannel_backend': flannel_backend, - 'system_pods_initial_delay': system_pods_initial_delay, - 'system_pods_timeout': system_pods_timeout, - 'admission_control_list': admission_control_list, - 'prometheus_monitoring': prometheus_monitoring, - 'grafana_admin_passwd': grafana_admin_passwd, - 'kube_dashboard_enabled': kube_dashboard_enabled, - 'docker_volume_type': docker_volume_type, - 'etcd_volume_size': etcd_volume_size, - 'username': 'fake_user', - 'tenant_name': 'fake_tenant', - 'magnum_url': mock_osc.magnum_url.return_value, - 'region_name': mock_osc.cinder_region_name.return_value}} - mock_get_params.assert_called_once_with(mock_context, - mock_cluster_template, - mock_cluster, - **expected_kwargs) - - @mock.patch('magnum.common.clients.OpenStackClients') - @mock.patch('magnum.drivers.heat.template_def' - '.BaseTemplateDefinition.get_discovery_url') - @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' - '.get_params') - @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' - '.get_output') - def test_k8s_get_params_insecure(self, mock_get_output, mock_get_params, - mock_get_discovery_url, mock_osc_class): - mock_context = mock.MagicMock() - mock_context.auth_token = 'AUTH_TOKEN' - mock_cluster_template = mock.MagicMock() - mock_cluster_template.tls_disabled = True - mock_cluster_template.registry_enabled = False - mock_cluster = mock.MagicMock() - mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' - del mock_cluster.stack_id - mock_scale_manager = mock.MagicMock() - mock_osc = mock.MagicMock() - mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' - mock_osc.cinder_region_name.return_value - mock_osc_class.return_value = mock_osc - - removal_nodes = ['node1', 'node2'] - mock_scale_manager.get_removal_nodes.return_value = removal_nodes - mock_get_discovery_url.return_value = 'fake_discovery_url' - - mock_context.auth_url = 'http://192.168.10.10:5000/v3' - mock_context.user_name = 'fake_user' - mock_context.tenant = 'fake_tenant' - - flannel_cidr = mock_cluster_template.labels.get('flannel_network_cidr') - flannel_subnet = mock_cluster_template.labels.get( - 'flannel_network_subnetlen') - flannel_backend = mock_cluster_template.labels.get('flannel_backend') - system_pods_initial_delay = mock_cluster_template.labels.get( - 'system_pods_initial_delay') - system_pods_timeout = mock_cluster_template.labels.get( - 'system_pods_timeout') - admission_control_list = mock_cluster_template.labels.get( - 'admission_control_list') - prometheus_monitoring = mock_cluster_template.labels.get( - 'prometheus_monitoring') - grafana_admin_passwd = mock_cluster_template.labels.get( - 'grafana_admin_passwd') - kube_dashboard_enabled = mock_cluster_template.labels.get( - 'kube_dashboard_enabled') - docker_volume_type = mock_cluster_template.labels.get( - 'docker_volume_type') - etcd_volume_size = mock_cluster_template.labels.get( - 'etcd_volume_size') - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - - k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster, - scale_manager=mock_scale_manager) - - expected_kwargs = {'extra_params': { - 'minions_to_remove': removal_nodes, - 'discovery_url': 'fake_discovery_url', - 'flannel_network_cidr': flannel_cidr, - 'flannel_network_subnetlen': flannel_subnet, - 'flannel_backend': flannel_backend, - 'system_pods_initial_delay': system_pods_initial_delay, - 'system_pods_timeout': system_pods_timeout, - 'admission_control_list': admission_control_list, - 'prometheus_monitoring': prometheus_monitoring, - 'grafana_admin_passwd': grafana_admin_passwd, - 'kube_dashboard_enabled': kube_dashboard_enabled, - 'docker_volume_type': docker_volume_type, - 'etcd_volume_size': etcd_volume_size, - 'username': 'fake_user', - 'tenant_name': 'fake_tenant', - 'magnum_url': mock_osc.magnum_url.return_value, - 'region_name': mock_osc.cinder_region_name.return_value, - 'loadbalancing_protocol': 'HTTP', - 'kubernetes_port': 8080}} - mock_get_params.assert_called_once_with(mock_context, - mock_cluster_template, - mock_cluster, - **expected_kwargs) - - @mock.patch('requests.get') - def test_k8s_validate_discovery_url(self, mock_get): - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - k8s_def.validate_discovery_url('http://etcd/test', 1) - - @mock.patch('requests.get') - def test_k8s_validate_discovery_url_fail(self, mock_get): - mock_get.side_effect = req_exceptions.RequestException() - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - self.assertRaises(exception.GetClusterSizeFailed, - k8s_def.validate_discovery_url, - 'http://etcd/test', 1) - - @mock.patch('requests.get') - def test_k8s_validate_discovery_url_invalid(self, mock_get): - mock_resp = mock.MagicMock() - mock_resp.text = str('{"action":"get"}') - mock_get.return_value = mock_resp - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - self.assertRaises(exception.InvalidClusterDiscoveryURL, - k8s_def.validate_discovery_url, - 'http://etcd/test', 1) - - @mock.patch('requests.get') - def test_k8s_validate_discovery_url_unexpect_size(self, mock_get): - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - self.assertRaises(exception.InvalidClusterSize, - k8s_def.validate_discovery_url, - 'http://etcd/test', 5) - - @mock.patch('requests.get') - def test_k8s_get_discovery_url(self, mock_get): - CONF.set_override('etcd_discovery_service_endpoint_format', - 'http://etcd/test?size=%(size)d', - group='cluster') - expected_discovery_url = 'http://etcd/token' - mock_resp = mock.MagicMock() - mock_resp.text = expected_discovery_url - mock_get.return_value = mock_resp - mock_cluster = mock.MagicMock() - mock_cluster.master_count = 10 - mock_cluster.discovery_url = None - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - discovery_url = k8s_def.get_discovery_url(mock_cluster) - - mock_get.assert_called_once_with('http://etcd/test?size=10') - self.assertEqual(expected_discovery_url, mock_cluster.discovery_url) - self.assertEqual(expected_discovery_url, discovery_url) - - @mock.patch('requests.get') - def test_k8s_get_discovery_url_fail(self, mock_get): - CONF.set_override('etcd_discovery_service_endpoint_format', - 'http://etcd/test?size=%(size)d', - group='cluster') - mock_get.side_effect = req_exceptions.RequestException() - mock_cluster = mock.MagicMock() - mock_cluster.master_count = 10 - mock_cluster.discovery_url = None - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - - self.assertRaises(exception.GetDiscoveryUrlFailed, - k8s_def.get_discovery_url, mock_cluster) - - def test_k8s_get_heat_param(self): - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - - heat_param = k8s_def.get_heat_param(cluster_attr='node_count') - self.assertEqual('number_of_minions', heat_param) - - @mock.patch('requests.get') - def test_k8s_get_discovery_url_not_found(self, mock_get): - mock_resp = mock.MagicMock() - mock_resp.text = '' - mock_get.return_value = mock_resp - - fake_cluster = mock.MagicMock() - fake_cluster.discovery_url = None - - self.assertRaises( - exception.InvalidDiscoveryURL, - k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url, - fake_cluster) - - def _test_update_outputs_api_address(self, template_definition, - params, tls=True): - - expected_api_address = '%(protocol)s://%(address)s:%(port)s' % params - - outputs = [ - {"output_value": params['address'], - "description": "No description given", - "output_key": 'api_address'}, - ] - mock_stack = mock.MagicMock() - mock_stack.to_dict.return_value = {'outputs': outputs} - mock_cluster = mock.MagicMock() - mock_cluster_template = mock.MagicMock() - mock_cluster_template.tls_disabled = tls - - template_definition.update_outputs(mock_stack, mock_cluster_template, - mock_cluster) - - self.assertEqual(expected_api_address, mock_cluster.api_address) - - def test_update_k8s_outputs_api_address(self): - address = 'updated_address' - protocol = 'http' - port = '8080' - params = { - 'protocol': protocol, - 'address': address, - 'port': port, - } - - template_definition = k8sa_tdef.AtomicK8sTemplateDefinition() - self._test_update_outputs_api_address(template_definition, params) - - def test_update_swarm_outputs_api_address(self): - address = 'updated_address' - protocol = 'tcp' - port = '2376' - params = { - 'protocol': protocol, - 'address': address, - 'port': port, - } - - template_definition = swarm_tdef.AtomicSwarmTemplateDefinition() - self._test_update_outputs_api_address(template_definition, params) - - def test_update_k8s_outputs_if_cluster_template_is_secure(self): - address = 'updated_address' - protocol = 'https' - port = '6443' - params = { - 'protocol': protocol, - 'address': address, - 'port': port, - } - template_definition = k8sa_tdef.AtomicK8sTemplateDefinition() - self._test_update_outputs_api_address(template_definition, params, - tls=False) - - def test_update_swarm_outputs_if_cluster_template_is_secure(self): - address = 'updated_address' - protocol = 'tcp' - port = '2376' - params = { - 'protocol': protocol, - 'address': address, - 'port': port, - } - - template_definition = swarm_tdef.AtomicSwarmTemplateDefinition() - self._test_update_outputs_api_address(template_definition, params, - tls=False) - - def _test_update_outputs_none_api_address(self, template_definition, - params, tls=True): - - outputs = [ - {"output_value": params['address'], - "description": "No description given", - "output_key": 'api_address'}, - ] - mock_stack = mock.MagicMock() - mock_stack.to_dict.return_value = {'outputs': outputs} - mock_cluster = mock.MagicMock() - mock_cluster.api_address = 'none_api_address' - mock_cluster_template = mock.MagicMock() - mock_cluster_template.tls_disabled = tls - - template_definition.update_outputs(mock_stack, mock_cluster_template, - mock_cluster) - - self.assertEqual('none_api_address', mock_cluster.api_address) - - def test_update_k8s_outputs_none_api_address(self): - protocol = 'http' - port = '8080' - params = { - 'protocol': protocol, - 'address': None, - 'port': port, - } - - template_definition = k8sa_tdef.AtomicK8sTemplateDefinition() - self._test_update_outputs_none_api_address(template_definition, params) - - def test_update_swarm_outputs_none_api_address(self): - protocol = 'tcp' - port = '2376' - params = { - 'protocol': protocol, - 'address': None, - 'port': port, - } - template_definition = swarm_tdef.AtomicSwarmTemplateDefinition() - self._test_update_outputs_none_api_address(template_definition, params) - - def test_update_outputs_master_address(self): - self._test_update_outputs_server_addrtess( - public_ip_output_key='kube_masters', - private_ip_output_key='kube_masters_private', - cluster_attr='master_addresses', - ) - - def test_update_outputs_node_address(self): - self._test_update_outputs_server_addrtess( - public_ip_output_key='kube_minions', - private_ip_output_key='kube_minions_private', - cluster_attr='node_addresses', - ) - - def test_update_outputs_master_address_fip_disabled(self): - self._test_update_outputs_server_addrtess( - floating_ip_enabled=False, - public_ip_output_key='kube_masters', - private_ip_output_key='kube_masters_private', - cluster_attr='master_addresses', - ) - - def test_update_outputs_node_address_fip_disabled(self): - self._test_update_outputs_server_addrtess( - floating_ip_enabled=False, - public_ip_output_key='kube_minions', - private_ip_output_key='kube_minions_private', - cluster_attr='node_addresses', - ) - - -class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase): - - def get_definition(self): - return k8s_i_dr.Driver().get_template_definition() - - def assert_neutron_find(self, mock_neutron_v20_find, - osc, cluster_template): - mock_neutron_v20_find.assert_called_once_with( - osc.neutron(), - 'subnet', - cluster_template.fixed_subnet - ) - - def assert_raises_from_get_fixed_network_id( - self, - mock_neutron_v20_find, - exeption_from_neutron_client, - expected_exception_class - ): - definition = self.get_definition() - osc = mock.MagicMock() - cluster_template = mock.MagicMock() - mock_neutron_v20_find.side_effect = exeption_from_neutron_client - - self.assertRaises( - expected_exception_class, - definition.get_fixed_network_id, - osc, - cluster_template - ) - - @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') - def test_get_fixed_network_id(self, mock_neutron_v20_find): - expected_network_id = 'expected_network_id' - - osc = mock.MagicMock() - cluster_template = mock.MagicMock() - definition = self.get_definition() - mock_neutron_v20_find.return_value = { - 'ip_version': 4, - 'network_id': expected_network_id, - } - - self.assertEqual( - expected_network_id, - definition.get_fixed_network_id(osc, cluster_template) - ) - self.assert_neutron_find(mock_neutron_v20_find, osc, cluster_template) - - @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') - def test_get_fixed_network_id_with_invalid_ip_ver(self, - mock_neutron_v20_find): - osc = mock.MagicMock() - cluster_template = mock.MagicMock() - definition = self.get_definition() - mock_neutron_v20_find.return_value = { - 'ip_version': 6, - 'network_id': 'expected_network_id', - } - - self.assertRaises( - exception.InvalidSubnet, - definition.get_fixed_network_id, - osc, - cluster_template - ) - - @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') - def test_get_fixed_network_id_with_duplicated_name(self, - mock_neutron_v20_find): - ex = n_exception.NeutronClientNoUniqueMatch( - resource='subnet', - name='duplicated-name' - ) - - self.assert_raises_from_get_fixed_network_id( - mock_neutron_v20_find, - ex, - exception.InvalidSubnet, - ) - - @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') - def test_get_fixed_network_id_with_client_error(self, - mock_neutron_v20_find): - ex = n_exception.BadRequest() - - self.assert_raises_from_get_fixed_network_id( - mock_neutron_v20_find, - ex, - exception.InvalidSubnet, - ) - - @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') - def test_get_fixed_network_id_with_server_error(self, - mock_neutron_v20_find): - ex = n_exception.ServiceUnavailable() - - self.assert_raises_from_get_fixed_network_id( - mock_neutron_v20_find, - ex, - n_exception.ServiceUnavailable, - ) - - -class AtomicSwarmTemplateDefinitionTestCase(base.TestCase): - - @mock.patch('magnum.common.clients.OpenStackClients') - @mock.patch('magnum.drivers.swarm_fedora_atomic_v1.template_def' - '.AtomicSwarmTemplateDefinition.get_discovery_url') - @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' - '.get_params') - @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' - '.get_output') - def test_swarm_get_params(self, mock_get_output, mock_get_params, - mock_get_discovery_url, mock_osc_class): - mock_context = mock.MagicMock() - mock_context.auth_token = 'AUTH_TOKEN' - mock_cluster_template = mock.MagicMock() - mock_cluster_template.tls_disabled = False - mock_cluster_template.registry_enabled = False - mock_cluster = mock.MagicMock() - mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' - del mock_cluster.stack_id - mock_osc = mock.MagicMock() - mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1' - mock_osc_class.return_value = mock_osc - - mock_get_discovery_url.return_value = 'fake_discovery_url' - - mock_context.auth_url = 'http://192.168.10.10:5000/v3' - mock_context.user_name = 'fake_user' - mock_context.tenant = 'fake_tenant' - - docker_volume_type = mock_cluster_template.labels.get( - 'docker_volume_type') - flannel_cidr = mock_cluster_template.labels.get('flannel_network_cidr') - flannel_subnet = mock_cluster_template.labels.get( - 'flannel_network_subnetlen') - flannel_backend = mock_cluster_template.labels.get('flannel_backend') - rexray_preempt = mock_cluster_template.labels.get('rexray_preempt') - swarm_strategy = mock_cluster_template.labels.get('swarm_strategy') - - swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition() - - swarm_def.get_params(mock_context, mock_cluster_template, mock_cluster) - - expected_kwargs = {'extra_params': { - 'discovery_url': 'fake_discovery_url', - 'magnum_url': mock_osc.magnum_url.return_value, - 'flannel_network_cidr': flannel_cidr, - 'flannel_backend': flannel_backend, - 'flannel_network_subnetlen': flannel_subnet, - 'auth_url': 'http://192.168.10.10:5000/v3', - 'rexray_preempt': rexray_preempt, - 'swarm_strategy': swarm_strategy, - 'docker_volume_type': docker_volume_type}} - mock_get_params.assert_called_once_with(mock_context, - mock_cluster_template, - mock_cluster, - **expected_kwargs) - - @mock.patch('requests.get') - def test_swarm_validate_discovery_url(self, mock_get): - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - k8s_def.validate_discovery_url('http://etcd/test', 1) - - @mock.patch('requests.get') - def test_swarm_validate_discovery_url_fail(self, mock_get): - mock_get.side_effect = req_exceptions.RequestException() - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - self.assertRaises(exception.GetClusterSizeFailed, - k8s_def.validate_discovery_url, - 'http://etcd/test', 1) - - @mock.patch('requests.get') - def test_swarm_validate_discovery_url_invalid(self, mock_get): - mock_resp = mock.MagicMock() - mock_resp.text = str('{"action":"get"}') - mock_get.return_value = mock_resp - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - self.assertRaises(exception.InvalidClusterDiscoveryURL, - k8s_def.validate_discovery_url, - 'http://etcd/test', 1) - - @mock.patch('requests.get') - def test_swarm_validate_discovery_url_unexpect_size(self, mock_get): - expected_result = str('{"action":"get","node":{"key":"test","value":' - '"1","modifiedIndex":10,"createdIndex":10}}') - mock_resp = mock.MagicMock() - mock_resp.text = expected_result - mock_get.return_value = mock_resp - - k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() - self.assertRaises(exception.InvalidClusterSize, - k8s_def.validate_discovery_url, - 'http://etcd/test', 5) - - @mock.patch('requests.get') - def test_swarm_get_discovery_url(self, mock_get): - CONF.set_override('etcd_discovery_service_endpoint_format', - 'http://etcd/test?size=%(size)d', - group='cluster') - expected_discovery_url = 'http://etcd/token' - mock_resp = mock.MagicMock() - mock_resp.text = expected_discovery_url - mock_get.return_value = mock_resp - mock_cluster = mock.MagicMock() - mock_cluster.discovery_url = None - - swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition() - discovery_url = swarm_def.get_discovery_url(mock_cluster) - - mock_get.assert_called_once_with('http://etcd/test?size=1') - self.assertEqual(mock_cluster.discovery_url, expected_discovery_url) - self.assertEqual(discovery_url, expected_discovery_url) - - @mock.patch('requests.get') - def test_swarm_get_discovery_url_not_found(self, mock_get): - mock_resp = mock.MagicMock() - mock_resp.text = '' - mock_get.return_value = mock_resp - - fake_cluster = mock.MagicMock() - fake_cluster.discovery_url = None - - self.assertRaises( - exception.InvalidDiscoveryURL, - k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url, - fake_cluster) - - def test_swarm_get_heat_param(self): - swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition() - - heat_param = swarm_def.get_heat_param(cluster_attr='node_count') - self.assertEqual('number_of_nodes', heat_param) - - def test_update_outputs(self): - swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition() - - expected_api_address = 'updated_address' - expected_node_addresses = ['ex_minion', 'address'] - - outputs = [ - {"output_value": expected_api_address, - "description": "No description given", - "output_key": "api_address"}, - {"output_value": ['any', 'output'], - "description": "No description given", - "output_key": "swarm_master_private"}, - {"output_value": ['any', 'output'], - "description": "No description given", - "output_key": "swarm_master"}, - {"output_value": ['any', 'output'], - "description": "No description given", - "output_key": "swarm_nodes_private"}, - {"output_value": expected_node_addresses, - "description": "No description given", - "output_key": "swarm_nodes"}, - ] - mock_stack = mock.MagicMock() - mock_stack.to_dict.return_value = {'outputs': outputs} - mock_cluster = mock.MagicMock() - mock_cluster_template = mock.MagicMock() - - swarm_def.update_outputs(mock_stack, mock_cluster_template, - mock_cluster) - expected_api_address = "tcp://%s:2376" % expected_api_address - self.assertEqual(expected_api_address, mock_cluster.api_address) - self.assertEqual(expected_node_addresses, mock_cluster.node_addresses) - - -class UbuntuMesosTemplateDefinitionTestCase(base.TestCase): - - @mock.patch('magnum.common.clients.OpenStackClients') - @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' - '.get_params') - @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' - '.get_output') - def test_mesos_get_params(self, mock_get_output, mock_get_params, - mock_osc_class): - mock_context = mock.MagicMock() - mock_context.auth_url = 'http://192.168.10.10:5000/v3' - mock_context.user_name = 'mesos_user' - mock_context.tenant = 'admin' - mock_context.domain_name = 'domainname' - mock_cluster_template = mock.MagicMock() - mock_cluster_template.tls_disabled = False - rexray_preempt = mock_cluster_template.labels.get('rexray_preempt') - mesos_slave_isolation = mock_cluster_template.labels.get( - 'mesos_slave_isolation') - mesos_slave_work_dir = mock_cluster_template.labels.get( - 'mesos_slave_work_dir') - mesos_slave_image_providers = mock_cluster_template.labels.get( - 'image_providers') - mesos_slave_executor_env_variables = mock_cluster_template.labels.get( - 'mesos_slave_executor_env_variables') - mock_cluster = mock.MagicMock() - mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' - del mock_cluster.stack_id - mock_osc = mock.MagicMock() - mock_osc.cinder_region_name.return_value = 'RegionOne' - mock_osc_class.return_value = mock_osc - - removal_nodes = ['node1', 'node2'] - mock_scale_manager = mock.MagicMock() - mock_scale_manager.get_removal_nodes.return_value = removal_nodes - - mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition() - - mesos_def.get_params(mock_context, mock_cluster_template, mock_cluster, - scale_manager=mock_scale_manager) - - expected_kwargs = {'extra_params': { - 'region_name': mock_osc.cinder_region_name.return_value, - 'auth_url': 'http://192.168.10.10:5000/v3', - 'username': 'mesos_user', - 'tenant_name': 'admin', - 'domain_name': 'domainname', - 'rexray_preempt': rexray_preempt, - 'mesos_slave_isolation': mesos_slave_isolation, - 'mesos_slave_work_dir': mesos_slave_work_dir, - 'mesos_slave_executor_env_variables': - mesos_slave_executor_env_variables, - 'mesos_slave_image_providers': mesos_slave_image_providers, - 'slaves_to_remove': removal_nodes}} - mock_get_params.assert_called_once_with(mock_context, - mock_cluster_template, - mock_cluster, - **expected_kwargs) - - def test_mesos_get_heat_param(self): - mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition() - - heat_param = mesos_def.get_heat_param(cluster_attr='node_count') - self.assertEqual('number_of_slaves', heat_param) - - heat_param = mesos_def.get_heat_param(cluster_attr='master_count') - self.assertEqual('number_of_masters', heat_param) - - def test_update_outputs(self): - mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition() - - expected_api_address = 'updated_address' - expected_node_addresses = ['ex_slave', 'address'] - expected_master_addresses = ['ex_master', 'address'] - - outputs = [ - {"output_value": expected_api_address, - "description": "No description given", - "output_key": "api_address"}, - {"output_value": ['any', 'output'], - "description": "No description given", - "output_key": "mesos_master_private"}, - {"output_value": expected_master_addresses, - "description": "No description given", - "output_key": "mesos_master"}, - {"output_value": ['any', 'output'], - "description": "No description given", - "output_key": "mesos_slaves_private"}, - {"output_value": expected_node_addresses, - "description": "No description given", - "output_key": "mesos_slaves"}, - ] - mock_stack = mock.MagicMock() - mock_stack.to_dict.return_value = {'outputs': outputs} - mock_cluster = mock.MagicMock() - mock_cluster_template = mock.MagicMock() - - mesos_def.update_outputs(mock_stack, mock_cluster_template, - mock_cluster) - - self.assertEqual(expected_api_address, mock_cluster.api_address) - self.assertEqual(expected_node_addresses, mock_cluster.node_addresses) - self.assertEqual(expected_master_addresses, - mock_cluster.master_addresses) diff --git a/magnum/tests/unit/objects/__init__.py b/magnum/tests/unit/objects/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/objects/test_cluster.py b/magnum/tests/unit/objects/test_cluster.py deleted file mode 100644 index ace9d5bf..00000000 --- a/magnum/tests/unit/objects/test_cluster.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import uuidutils -from testtools.matchers import HasLength - -from magnum.common import exception -from magnum import objects -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class TestClusterObject(base.DbTestCase): - - def setUp(self): - super(TestClusterObject, self).setUp() - self.fake_cluster = utils.get_test_cluster() - self.fake_cluster['trust_id'] = 'trust_id' - self.fake_cluster['trustee_username'] = 'trustee_user' - self.fake_cluster['trustee_user_id'] = 'trustee_user_id' - self.fake_cluster['trustee_password'] = 'password' - self.fake_cluster['coe_version'] = 'fake-coe-version' - self.fake_cluster['container_version'] = 'fake-container-version' - cluster_template_id = self.fake_cluster['cluster_template_id'] - self.fake_cluster_template = objects.ClusterTemplate( - uuid=cluster_template_id) - self.fake_cluster['keypair'] = 'keypair1' - self.fake_cluster['docker_volume_size'] = 3 - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_get_by_id(self, mock_cluster_template_get): - cluster_id = self.fake_cluster['id'] - with mock.patch.object(self.dbapi, 'get_cluster_by_id', - autospec=True) as mock_get_cluster: - mock_cluster_template_get.return_value = self.fake_cluster_template - mock_get_cluster.return_value = self.fake_cluster - cluster = objects.Cluster.get(self.context, cluster_id) - mock_get_cluster.assert_called_once_with(self.context, cluster_id) - self.assertEqual(self.context, cluster._context) - self.assertEqual(cluster.cluster_template_id, - cluster.cluster_template.uuid) - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_get_by_uuid(self, mock_cluster_template_get): - uuid = self.fake_cluster['uuid'] - with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', - autospec=True) as mock_get_cluster: - mock_cluster_template_get.return_value = self.fake_cluster_template - mock_get_cluster.return_value = self.fake_cluster - cluster = objects.Cluster.get(self.context, uuid) - mock_get_cluster.assert_called_once_with(self.context, uuid) - self.assertEqual(self.context, cluster._context) - self.assertEqual(cluster.cluster_template_id, - cluster.cluster_template.uuid) - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_get_by_name(self, mock_cluster_template_get): - name = self.fake_cluster['name'] - with mock.patch.object(self.dbapi, 'get_cluster_by_name', - autospec=True) as mock_get_cluster: - mock_cluster_template_get.return_value = self.fake_cluster_template - mock_get_cluster.return_value = self.fake_cluster - cluster = objects.Cluster.get_by_name(self.context, name) - mock_get_cluster.assert_called_once_with(self.context, name) - self.assertEqual(self.context, cluster._context) - self.assertEqual(cluster.cluster_template_id, - cluster.cluster_template.uuid) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.Cluster.get, self.context, 'not-a-uuid') - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_list(self, mock_cluster_template_get): - with mock.patch.object(self.dbapi, 'get_cluster_list', - autospec=True) as mock_get_list: - mock_get_list.return_value = [self.fake_cluster] - mock_cluster_template_get.return_value = self.fake_cluster_template - clusters = objects.Cluster.list(self.context) - self.assertEqual(1, mock_get_list.call_count) - self.assertThat(clusters, HasLength(1)) - self.assertIsInstance(clusters[0], objects.Cluster) - self.assertEqual(self.context, clusters[0]._context) - self.assertEqual(clusters[0].cluster_template_id, - clusters[0].cluster_template.uuid) - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_list_all(self, mock_cluster_template_get): - with mock.patch.object(self.dbapi, 'get_cluster_list', - autospec=True) as mock_get_list: - mock_get_list.return_value = [self.fake_cluster] - mock_cluster_template_get.return_value = self.fake_cluster_template - self.context.all_tenants = True - clusters = objects.Cluster.list(self.context) - mock_get_list.assert_called_once_with( - self.context, limit=None, marker=None, filters=None, - sort_dir=None, sort_key=None) - self.assertEqual(1, mock_get_list.call_count) - self.assertThat(clusters, HasLength(1)) - self.assertIsInstance(clusters[0], objects.Cluster) - self.assertEqual(self.context, clusters[0]._context) - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_list_with_filters(self, mock_cluster_template_get): - with mock.patch.object(self.dbapi, 'get_cluster_list', - autospec=True) as mock_get_list: - mock_get_list.return_value = [self.fake_cluster] - mock_cluster_template_get.return_value = self.fake_cluster_template - filters = {'name': 'cluster1'} - clusters = objects.Cluster.list(self.context, filters=filters) - - mock_get_list.assert_called_once_with(self.context, sort_key=None, - sort_dir=None, - filters=filters, limit=None, - marker=None) - self.assertEqual(1, mock_get_list.call_count) - self.assertThat(clusters, HasLength(1)) - self.assertIsInstance(clusters[0], objects.Cluster) - self.assertEqual(self.context, clusters[0]._context) - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_create(self, mock_cluster_template_get): - with mock.patch.object(self.dbapi, 'create_cluster', - autospec=True) as mock_create_cluster: - mock_cluster_template_get.return_value = self.fake_cluster_template - mock_create_cluster.return_value = self.fake_cluster - cluster = objects.Cluster(self.context, **self.fake_cluster) - cluster.create() - mock_create_cluster.assert_called_once_with(self.fake_cluster) - self.assertEqual(self.context, cluster._context) - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_destroy(self, mock_cluster_template_get): - uuid = self.fake_cluster['uuid'] - with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', - autospec=True) as mock_get_cluster: - mock_get_cluster.return_value = self.fake_cluster - mock_cluster_template_get.return_value = self.fake_cluster_template - with mock.patch.object(self.dbapi, 'destroy_cluster', - autospec=True) as mock_destroy_cluster: - cluster = objects.Cluster.get_by_uuid(self.context, uuid) - cluster.destroy() - mock_get_cluster.assert_called_once_with(self.context, uuid) - mock_destroy_cluster.assert_called_once_with(uuid) - self.assertEqual(self.context, cluster._context) - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_save(self, mock_cluster_template_get): - uuid = self.fake_cluster['uuid'] - with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', - autospec=True) as mock_get_cluster: - mock_cluster_template_get.return_value = self.fake_cluster_template - mock_get_cluster.return_value = self.fake_cluster - with mock.patch.object(self.dbapi, 'update_cluster', - autospec=True) as mock_update_cluster: - cluster = objects.Cluster.get_by_uuid(self.context, uuid) - cluster.node_count = 10 - cluster.master_count = 5 - cluster.save() - - mock_get_cluster.assert_called_once_with(self.context, uuid) - mock_update_cluster.assert_called_once_with( - uuid, {'node_count': 10, 'master_count': 5, - 'cluster_template': self.fake_cluster_template}) - self.assertEqual(self.context, cluster._context) - - @mock.patch('magnum.objects.ClusterTemplate.get_by_uuid') - def test_refresh(self, mock_cluster_template_get): - uuid = self.fake_cluster['uuid'] - new_uuid = uuidutils.generate_uuid() - returns = [dict(self.fake_cluster, uuid=uuid), - dict(self.fake_cluster, uuid=new_uuid)] - expected = [mock.call(self.context, uuid), - mock.call(self.context, uuid)] - with mock.patch.object(self.dbapi, 'get_cluster_by_uuid', - side_effect=returns, - autospec=True) as mock_get_cluster: - mock_cluster_template_get.return_value = self.fake_cluster_template - cluster = objects.Cluster.get_by_uuid(self.context, uuid) - self.assertEqual(uuid, cluster.uuid) - cluster.refresh() - self.assertEqual(new_uuid, cluster.uuid) - self.assertEqual(expected, mock_get_cluster.call_args_list) - self.assertEqual(self.context, cluster._context) diff --git a/magnum/tests/unit/objects/test_cluster_template.py b/magnum/tests/unit/objects/test_cluster_template.py deleted file mode 100644 index 7760ad61..00000000 --- a/magnum/tests/unit/objects/test_cluster_template.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import uuidutils -from testtools.matchers import HasLength - -from magnum.common import exception -from magnum import objects -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class TestClusterTemplateObject(base.DbTestCase): - - def setUp(self): - super(TestClusterTemplateObject, self).setUp() - self.fake_cluster_template = utils.get_test_cluster_template() - - def test_get_by_id(self): - cluster_template_id = self.fake_cluster_template['id'] - with mock.patch.object(self.dbapi, 'get_cluster_template_by_id', - autospec=True) as mock_get_cluster_template: - mock_get_cluster_template.return_value = self.fake_cluster_template - cluster_template = objects.ClusterTemplate.get(self.context, - cluster_template_id) - mock_get_cluster_template.assert_called_once_with( - self.context, cluster_template_id) - self.assertEqual(self.context, cluster_template._context) - - def test_get_by_uuid(self): - uuid = self.fake_cluster_template['uuid'] - with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', - autospec=True) as mock_get_cluster_template: - mock_get_cluster_template.return_value = self.fake_cluster_template - cluster_template = objects.ClusterTemplate.get(self.context, uuid) - mock_get_cluster_template.assert_called_once_with(self.context, - uuid) - self.assertEqual(self.context, cluster_template._context) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.ClusterTemplate.get, self.context, - 'not-a-uuid') - - def test_get_by_name(self): - name = self.fake_cluster_template['name'] - with mock.patch.object(self.dbapi, 'get_cluster_template_by_name', - autospec=True) as mock_get_cluster_template: - mock_get_cluster_template.return_value = self.fake_cluster_template - cluster_template = objects.ClusterTemplate.get_by_name( - self.context, name) - mock_get_cluster_template.assert_called_once_with(self.context, - name) - self.assertEqual(self.context, cluster_template._context) - - def test_list(self): - with mock.patch.object(self.dbapi, 'get_cluster_template_list', - autospec=True) as mock_get_list: - mock_get_list.return_value = [self.fake_cluster_template] - cluster_templates = objects.ClusterTemplate.list(self.context) - self.assertEqual(1, mock_get_list.call_count) - self.assertThat(cluster_templates, HasLength(1)) - self.assertIsInstance(cluster_templates[0], - objects.ClusterTemplate) - self.assertEqual(self.context, cluster_templates[0]._context) - - def test_create(self): - with mock.patch.object(self.dbapi, 'create_cluster_template', - autospec=True) as mock_create_cluster_template: - mock_create_cluster_template.return_value = \ - self.fake_cluster_template - cluster_template = objects.ClusterTemplate( - self.context, **self.fake_cluster_template) - cluster_template.create() - mock_create_cluster_template.assert_called_once_with( - self.fake_cluster_template) - self.assertEqual(self.context, cluster_template._context) - - def test_destroy(self): - uuid = self.fake_cluster_template['uuid'] - with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', - autospec=True) as mock_get_cluster_template: - mock_get_cluster_template.return_value = self.fake_cluster_template - with mock.patch.object( - self.dbapi, 'destroy_cluster_template', autospec=True)\ - as mock_destroy_cluster_template: - cluster_template = objects.ClusterTemplate.get_by_uuid( - self.context, uuid) - cluster_template.destroy() - mock_get_cluster_template.assert_called_once_with(self.context, - uuid) - mock_destroy_cluster_template.assert_called_once_with(uuid) - self.assertEqual(self.context, cluster_template._context) - - def test_save(self): - uuid = self.fake_cluster_template['uuid'] - with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', - autospec=True) as mock_get_cluster_template: - mock_get_cluster_template.return_value = self.fake_cluster_template - with mock.patch.object(self.dbapi, 'update_cluster_template', - autospec=True) \ - as mock_update_cluster_template: - cluster_template = objects.ClusterTemplate.get_by_uuid( - self.context, uuid) - cluster_template.image_id = 'test-image' - cluster_template.save() - - mock_get_cluster_template.assert_called_once_with(self.context, - uuid) - mock_update_cluster_template.assert_called_once_with( - uuid, {'image_id': 'test-image'}) - self.assertEqual(self.context, cluster_template._context) - - def test_refresh(self): - uuid = self.fake_cluster_template['uuid'] - new_uuid = uuidutils.generate_uuid() - returns = [dict(self.fake_cluster_template, uuid=uuid), - dict(self.fake_cluster_template, uuid=new_uuid)] - expected = [mock.call(self.context, uuid), - mock.call(self.context, uuid)] - with mock.patch.object(self.dbapi, 'get_cluster_template_by_uuid', - side_effect=returns, - autospec=True) as mock_get_cluster_template: - cluster_template = objects.ClusterTemplate.get_by_uuid( - self.context, uuid) - self.assertEqual(uuid, cluster_template.uuid) - cluster_template.refresh() - self.assertEqual(new_uuid, cluster_template.uuid) - self.assertEqual(expected, - mock_get_cluster_template.call_args_list) - self.assertEqual(self.context, cluster_template._context) diff --git a/magnum/tests/unit/objects/test_fields.py b/magnum/tests/unit/objects/test_fields.py deleted file mode 100644 index 89b7f1ec..00000000 --- a/magnum/tests/unit/objects/test_fields.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects.tests import test_fields - -from magnum.objects import fields - - -class TestClusterStatus(test_fields.TestField): - def setUp(self): - super(TestClusterStatus, self).setUp() - self.field = fields.ClusterStatusField() - self.coerce_good_values = [('CREATE_IN_PROGRESS', - 'CREATE_IN_PROGRESS'), - ('CREATE_FAILED', 'CREATE_FAILED'), - ('CREATE_COMPLETE', 'CREATE_COMPLETE'), - ('UPDATE_IN_PROGRESS', - 'UPDATE_IN_PROGRESS'), - ('UPDATE_FAILED', 'UPDATE_FAILED'), - ('UPDATE_COMPLETE', 'UPDATE_COMPLETE'), - ('DELETE_IN_PROGRESS', - 'DELETE_IN_PROGRESS'), - ('DELETE_FAILED', 'DELETE_FAILED'), - ('RESUME_COMPLETE', 'RESUME_COMPLETE'), - ('RESTORE_COMPLETE', 'RESTORE_COMPLETE'), - ('ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'), - ('SNAPSHOT_COMPLETE', 'SNAPSHOT_COMPLETE'), - ('CHECK_COMPLETE', 'CHECK_COMPLETE'), - ('ADOPT_COMPLETE', 'ADOPT_COMPLETE')] - self.coerce_bad_values = ['DELETE_STOPPED'] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'UPDATE_FAILED'", - self.field.stringify('UPDATE_FAILED')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'DELETE_STOPPED') - - -class TestContainerStatus(test_fields.TestField): - def setUp(self): - super(TestContainerStatus, self).setUp() - self.field = fields.ContainerStatusField() - self.coerce_good_values = [('Error', 'Error'), ('Running', 'Running'), - ('Stopped', 'Stopped'), - ('Paused', 'Paused'), - ('Unknown', 'Unknown'), ] - self.coerce_bad_values = ['DELETED'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'Stopped'", - self.field.stringify('Stopped')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'DELETED') - - -class TestClusterType(test_fields.TestField): - def setUp(self): - super(TestClusterType, self).setUp() - self.field = fields.ClusterTypeField() - self.coerce_good_values = [('kubernetes', 'kubernetes'), - ('swarm', 'swarm'), - ('mesos', 'mesos'), ] - self.coerce_bad_values = ['invalid'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'kubernetes'", - self.field.stringify('kubernetes')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'invalid') - - -class TestMagnumServiceBinary(test_fields.TestField): - def setUp(self): - super(TestMagnumServiceBinary, self).setUp() - self.field = fields.MagnumServiceBinaryField() - self.coerce_good_values = [('magnum-conductor', 'magnum-conductor')] - self.coerce_bad_values = ['invalid'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'magnum-conductor'", - self.field.stringify('magnum-conductor')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'invalid') - - -class TestServerType(test_fields.TestField): - def setUp(self): - super(TestServerType, self).setUp() - self.field = fields.ServerTypeField() - self.coerce_good_values = [('vm', 'vm'), - ('bm', 'bm'), ] - self.coerce_bad_values = ['invalid'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'vm'", - self.field.stringify('vm')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'invalid') diff --git a/magnum/tests/unit/objects/test_magnum_service.py b/magnum/tests/unit/objects/test_magnum_service.py deleted file mode 100644 index 2b94032e..00000000 --- a/magnum/tests/unit/objects/test_magnum_service.py +++ /dev/null @@ -1,109 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from magnum import objects -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class TestMagnumServiceObject(base.DbTestCase): - - def setUp(self): - super(TestMagnumServiceObject, self).setUp() - self.fake_magnum_service = utils.get_test_magnum_service() - - def test_get_by_host_and_binary(self): - with mock.patch.object(self.dbapi, - 'get_magnum_service_by_host_and_binary', - autospec=True) as mock_get_magnum_service: - mock_get_magnum_service.return_value = self.fake_magnum_service - ms = objects.MagnumService.get_by_host_and_binary(self.context, - 'fake-host', - 'fake-bin') - mock_get_magnum_service.assert_called_once_with('fake-host', - 'fake-bin') - self.assertEqual(self.context, ms._context) - - def test_get_by_host_and_binary_no_service(self): - with mock.patch.object(self.dbapi, 'create_magnum_service', - autospec=True) as mock_get_magnum_service: - mock_get_magnum_service.return_value = None - ms = objects.MagnumService.get_by_host_and_binary(self.context, - 'fake-host', - 'fake-bin') - - self.assertIsNone(ms) - - def test_create(self): - with mock.patch.object(self.dbapi, 'create_magnum_service', - autospec=True) as mock_create_magnum_service: - mock_create_magnum_service.return_value = self.fake_magnum_service - ms_dict = {'host': 'fakehost', 'binary': 'fake-bin'} - ms = objects.MagnumService(self.context, **ms_dict) - ms.create(self.context) - mock_create_magnum_service.assert_called_once_with(ms_dict) - - def test_destroy(self): - with mock.patch.object(self.dbapi, - 'get_magnum_service_by_host_and_binary', - autospec=True) as mock_get_magnum_service: - mock_get_magnum_service.return_value = self.fake_magnum_service - with mock.patch.object(self.dbapi, - 'destroy_magnum_service', - autospec=True) as mock_destroy_ms: - ms = objects.MagnumService.get_by_host_and_binary( - self.context, 'fake-host', 'fake-bin') - ms.destroy() - mock_get_magnum_service.assert_called_once_with( - 'fake-host', 'fake-bin') - mock_destroy_ms.assert_called_once_with( - self.fake_magnum_service['id']) - self.assertEqual(self.context, ms._context) - - def test_save(self): - with mock.patch.object(self.dbapi, - 'get_magnum_service_by_host_and_binary', - autospec=True) as mock_get_magnum_service: - mock_get_magnum_service.return_value = self.fake_magnum_service - with mock.patch.object(self.dbapi, - 'update_magnum_service', - autospec=True) as mock_update_ms: - ms = objects.MagnumService.get_by_host_and_binary( - self.context, 'fake-host', 'fake-bin') - ms.disabled = True - ms.save() - mock_get_magnum_service.assert_called_once_with( - 'fake-host', 'fake-bin') - mock_update_ms.assert_called_once_with( - self.fake_magnum_service['id'], {'disabled': True}) - self.assertEqual(self.context, ms._context) - - def test_report_state_up(self): - with mock.patch.object(self.dbapi, - 'get_magnum_service_by_host_and_binary', - autospec=True) as mock_get_magnum_service: - mock_get_magnum_service.return_value = self.fake_magnum_service - with mock.patch.object(self.dbapi, - 'update_magnum_service', - autospec=True) as mock_update_ms: - ms = objects.MagnumService.get_by_host_and_binary( - self.context, 'fake-host', 'fake-bin') - last_report_count = self.fake_magnum_service['report_count'] - ms.report_state_up() - mock_get_magnum_service.assert_called_once_with( - 'fake-host', 'fake-bin') - self.assertEqual(self.context, ms._context) - mock_update_ms.assert_called_once_with( - self.fake_magnum_service['id'], - {'report_count': last_report_count + 1}) diff --git a/magnum/tests/unit/objects/test_objects.py b/magnum/tests/unit/objects/test_objects.py deleted file mode 100644 index 32af8fef..00000000 --- a/magnum/tests/unit/objects/test_objects.py +++ /dev/null @@ -1,460 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import gettext - -import mock -from oslo_versionedobjects import exception as object_exception -from oslo_versionedobjects import fields -from oslo_versionedobjects import fixture - -from magnum.common import context as magnum_context -from magnum.objects import base -from magnum.tests import base as test_base - -gettext.install('magnum') - - -@base.MagnumObjectRegistry.register -class MyObj(base.MagnumPersistentObject, base.MagnumObject): - VERSION = '1.0' - - fields = {'foo': fields.IntegerField(), - 'bar': fields.StringField(), - 'missing': fields.StringField(), - } - - def obj_load_attr(self, attrname): - setattr(self, attrname, 'loaded!') - - @base.remotable_classmethod - def query(cls, context): - obj = cls(context) - obj.foo = 1 - obj.bar = 'bar' - obj.obj_reset_changes() - return obj - - @base.remotable - def marco(self, context): - return 'polo' - - @base.remotable - def update_test(self, context): - if context.project_id == 'alternate': - self.bar = 'alternate-context' - else: - self.bar = 'updated' - - @base.remotable - def save(self, context): - self.obj_reset_changes() - - @base.remotable - def refresh(self, context): - self.foo = 321 - self.bar = 'refreshed' - self.obj_reset_changes() - - @base.remotable - def modify_save_modify(self, context): - self.bar = 'meow' - self.save(context) - self.foo = 42 - - -class MyObj2(object): - @classmethod - def obj_name(cls): - return 'MyObj' - - @base.remotable_classmethod - def get(cls, *args, **kwargs): - pass - - -@base.MagnumObjectRegistry.register_if(False) -class TestSubclassedObject(MyObj): - fields = {'new_field': fields.StringField()} - - -class _TestObject(object): - def test_hydration_type_error(self): - primitive = {'magnum_object.name': 'MyObj', - 'magnum_object.namespace': 'magnum', - 'magnum_object.version': '1.0', - 'magnum_object.data': {'foo': 'a'}} - self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) - - def test_hydration(self): - primitive = {'magnum_object.name': 'MyObj', - 'magnum_object.namespace': 'magnum', - 'magnum_object.version': '1.0', - 'magnum_object.data': {'foo': 1}} - obj = MyObj.obj_from_primitive(primitive) - self.assertEqual(1, obj.foo) - - def test_hydration_bad_ns(self): - primitive = {'magnum_object.name': 'MyObj', - 'magnum_object.namespace': 'foo', - 'magnum_object.version': '1.0', - 'magnum_object.data': {'foo': 1}} - self.assertRaises(object_exception.UnsupportedObjectError, - MyObj.obj_from_primitive, primitive) - - def test_dehydration(self): - expected = {'magnum_object.name': 'MyObj', - 'magnum_object.namespace': 'magnum', - 'magnum_object.version': '1.0', - 'magnum_object.data': {'foo': 1}} - obj = MyObj(self.context) - obj.foo = 1 - obj.obj_reset_changes() - self.assertEqual(expected, obj.obj_to_primitive()) - - def test_get_updates(self): - obj = MyObj(self.context) - self.assertEqual({}, obj.obj_get_changes()) - obj.foo = 123 - self.assertEqual({'foo': 123}, obj.obj_get_changes()) - obj.bar = 'test' - self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) - obj.obj_reset_changes() - self.assertEqual({}, obj.obj_get_changes()) - - def test_object_property(self): - obj = MyObj(self.context, foo=1) - self.assertEqual(1, obj.foo) - - def test_object_property_type_error(self): - obj = MyObj(self.context) - - def fail(): - obj.foo = 'a' - self.assertRaises(ValueError, fail) - - def test_load(self): - obj = MyObj(self.context) - self.assertEqual('loaded!', obj.bar) - - def test_load_in_base(self): - @base.MagnumObjectRegistry.register_if(False) - class Foo(base.MagnumPersistentObject, base.MagnumObject): - fields = {'foobar': fields.IntegerField()} - obj = Foo(self.context) - # NOTE(danms): Can't use assertRaisesRegexp() because of py26 - raised = False - ex = None - try: - obj.foobar - except NotImplementedError as e: - raised = True - ex = e - self.assertTrue(raised) - self.assertIn('foobar', str(ex)) - - def test_loaded_in_primitive(self): - obj = MyObj(self.context) - obj.foo = 1 - obj.obj_reset_changes() - self.assertEqual('loaded!', obj.bar) - expected = {'magnum_object.name': 'MyObj', - 'magnum_object.namespace': 'magnum', - 'magnum_object.version': '1.0', - 'magnum_object.changes': ['bar'], - 'magnum_object.data': {'foo': 1, - 'bar': 'loaded!'}} - self.assertEqual(expected, obj.obj_to_primitive()) - - def test_changes_in_primitive(self): - obj = MyObj(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - primitive = obj.obj_to_primitive() - self.assertIn('magnum_object.changes', primitive) - obj2 = MyObj.obj_from_primitive(primitive) - self.assertEqual(set(['foo']), obj2.obj_what_changed()) - obj2.obj_reset_changes() - self.assertEqual(set(), obj2.obj_what_changed()) - - def test_unknown_objtype(self): - self.assertRaises(object_exception.UnsupportedObjectError, - base.MagnumObject.obj_class_from_name, 'foo', '1.0') - - def test_with_alternate_context(self): - context1 = magnum_context.RequestContext('foo', 'foo') - context2 = magnum_context.RequestContext('bar', project_id='alternate') - obj = MyObj.query(context1) - obj.update_test(context2) - self.assertEqual('alternate-context', obj.bar) - - def test_orphaned_object(self): - obj = MyObj.query(self.context) - obj._context = None - self.assertRaises(object_exception.OrphanedObjectError, - obj.update_test) - - def test_changed_1(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.update_test(self.context) - self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) - self.assertEqual(123, obj.foo) - - def test_changed_2(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.save(self.context) - self.assertEqual(set([]), obj.obj_what_changed()) - self.assertEqual(123, obj.foo) - - def test_changed_3(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.refresh(self.context) - self.assertEqual(set([]), obj.obj_what_changed()) - self.assertEqual(321, obj.foo) - self.assertEqual('refreshed', obj.bar) - - def test_changed_4(self): - obj = MyObj.query(self.context) - obj.bar = 'something' - self.assertEqual(set(['bar']), obj.obj_what_changed()) - obj.modify_save_modify(self.context) - self.assertEqual(set(['foo']), obj.obj_what_changed()) - self.assertEqual(42, obj.foo) - self.assertEqual('meow', obj.bar) - - def test_static_result(self): - obj = MyObj.query(self.context) - self.assertEqual('bar', obj.bar) - result = obj.marco(self.context) - self.assertEqual('polo', result) - - def test_updates(self): - obj = MyObj.query(self.context) - self.assertEqual(1, obj.foo) - obj.update_test(self.context) - self.assertEqual('updated', obj.bar) - - def test_base_attributes(self): - dt = datetime.datetime(1955, 11, 5) - datatime = fields.DateTimeField() - obj = MyObj(self.context) - obj.created_at = dt - obj.updated_at = dt - expected = {'magnum_object.name': 'MyObj', - 'magnum_object.namespace': 'magnum', - 'magnum_object.version': '1.0', - 'magnum_object.changes': - ['created_at', 'updated_at'], - 'magnum_object.data': - {'created_at': datatime.stringify(dt), - 'updated_at': datatime.stringify(dt)} - } - actual = obj.obj_to_primitive() - # magnum_object.changes is built from a set and order is undefined - self.assertEqual(sorted(expected['magnum_object.changes']), - sorted(actual['magnum_object.changes'])) - del expected['magnum_object.changes'], actual['magnum_object.changes'] - self.assertEqual(expected, actual) - - def test_contains(self): - obj = MyObj(self.context) - self.assertNotIn('foo', obj) - obj.foo = 1 - self.assertIn('foo', obj) - self.assertNotIn('does_not_exist', obj) - - def test_obj_attr_is_set(self): - obj = MyObj(self.context, foo=1) - self.assertTrue(obj.obj_attr_is_set('foo')) - self.assertFalse(obj.obj_attr_is_set('bar')) - self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') - - def test_get(self): - obj = MyObj(self.context, foo=1) - # Foo has value, should not get the default - self.assertEqual(1, getattr(obj, 'foo', 2)) - # Foo has value, should return the value without error - self.assertEqual(1, getattr(obj, 'foo')) - # Bar without a default should lazy-load - self.assertEqual('loaded!', getattr(obj, 'bar')) - # Bar now has a default, but loaded value should be returned - self.assertEqual('loaded!', getattr(obj, 'bar', 'not-loaded')) - # Invalid attribute should raise AttributeError - self.assertFalse(hasattr(obj, 'nothing')) - - def test_object_inheritance(self): - base_fields = list(base.MagnumPersistentObject.fields.keys()) - myobj_fields = ['foo', 'bar', 'missing'] + base_fields - myobj3_fields = ['new_field'] - self.assertTrue(issubclass(TestSubclassedObject, MyObj)) - self.assertEqual(len(MyObj.fields), len(myobj_fields)) - self.assertEqual(set(MyObj.fields.keys()), set(myobj_fields)) - self.assertEqual(len(TestSubclassedObject.fields), - len(myobj_fields) + len(myobj3_fields)) - self.assertEqual(set(TestSubclassedObject.fields.keys()), - set(myobj_fields) | set(myobj3_fields)) - - def test_get_changes(self): - obj = MyObj(self.context) - self.assertEqual({}, obj.obj_get_changes()) - obj.foo = 123 - self.assertEqual({'foo': 123}, obj.obj_get_changes()) - obj.bar = 'test' - self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) - obj.obj_reset_changes() - self.assertEqual({}, obj.obj_get_changes()) - - def test_obj_fields(self): - @base.MagnumObjectRegistry.register_if(False) - class TestObj(base.MagnumPersistentObject, base.MagnumObject): - fields = {'foo': fields.IntegerField()} - obj_extra_fields = ['bar'] - - @property - def bar(self): - return 'this is bar' - - obj = TestObj(self.context) - self.assertEqual(set(['created_at', 'updated_at', 'foo', 'bar']), - set(obj.obj_fields)) - - def test_obj_constructor(self): - obj = MyObj(self.context, foo=123, bar='abc') - self.assertEqual(123, obj.foo) - self.assertEqual('abc', obj.bar) - self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) - - -class TestObject(test_base.TestCase, _TestObject): - pass - - -# This is a static dictionary that holds all fingerprints of the versioned -# objects registered with the MagnumRegistry. Each fingerprint contains -# the version of the object and an md5 hash of RPC-critical parts of the -# object (fields and remotable methods). If either the version or hash -# change, the static tree needs to be updated. -# For more information on object version testing, read -# http://docs.openstack.org/developer/magnum/objects.html -object_data = { - 'Cluster': '1.14-281c582b16291c4f0666371e53975a5c', - 'ClusterTemplate': '1.17-f1ce5212b46506360b41ab5cb7658af4', - 'Certificate': '1.1-1924dc077daa844f0f9076332ef96815', - 'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd', - 'X509KeyPair': '1.2-d81950af36c59a71365e33ce539d24f9', - 'MagnumService': '1.0-2d397ec59b0046bd5ec35cd3e06efeca', - 'Stats': '1.0-73a1cd6e3c0294c932a66547faba216c', - 'Quota': '1.0-94e100aebfa88f7d8428e007f2049c18', -} - - -class TestObjectVersions(test_base.TestCase): - def test_versions(self): - # Test the versions of current objects with the static tree above. - # This ensures that any incompatible object changes require a version - # bump. - classes = base.MagnumObjectRegistry.obj_classes() - checker = fixture.ObjectVersionChecker(obj_classes=classes) - - expected, actual = checker.test_hashes(object_data) - self.assertEqual(expected, actual, - "Fields or remotable methods in some objects have " - "changed. Make sure the versions of the objects has " - "been bumped, and update the hashes in the static " - "fingerprints tree (object_data). For more " - "information, read http://docs.openstack.org/" - "developer/magnum/objects.html.") - - -class TestObjectSerializer(test_base.TestCase): - - def test_object_serialization(self): - ser = base.MagnumObjectSerializer() - obj = MyObj(self.context) - primitive = ser.serialize_entity(self.context, obj) - self.assertIn('magnum_object.name', primitive) - obj2 = ser.deserialize_entity(self.context, primitive) - self.assertIsInstance(obj2, MyObj) - self.assertEqual(self.context, obj2._context) - - def test_object_serialization_iterables(self): - ser = base.MagnumObjectSerializer() - obj = MyObj(self.context) - for iterable in (list, tuple, set): - thing = iterable([obj]) - primitive = ser.serialize_entity(self.context, thing) - self.assertEqual(1, len(primitive)) - for item in primitive: - self.assertFalse(isinstance(item, base.MagnumObject)) - thing2 = ser.deserialize_entity(self.context, primitive) - self.assertEqual(1, len(thing2)) - for item in thing2: - self.assertIsInstance(item, MyObj) - - @mock.patch('magnum.objects.base.MagnumObject.indirection_api') - def _test_deserialize_entity_newer(self, obj_version, backported_to, - mock_indirection_api, - my_version='1.6'): - ser = base.MagnumObjectSerializer() - mock_indirection_api.object_backport_versions.side_effect \ - = NotImplementedError() - mock_indirection_api.object_backport.return_value = 'backported' - - @base.MagnumObjectRegistry.register - class MyTestObj(MyObj): - VERSION = my_version - - obj = MyTestObj() - obj.VERSION = obj_version - primitive = obj.obj_to_primitive() - result = ser.deserialize_entity(self.context, primitive) - if backported_to is None: - self.assertEqual( - False, - mock_indirection_api.object_backport.called) - else: - self.assertEqual('backported', result) - mock_indirection_api.object_backport.assert_called_with( - self.context, primitive, backported_to) - - def test_deserialize_entity_newer_version_backports_level1(self): - "Test object with unsupported (newer) version" - self._test_deserialize_entity_newer('11.5', '1.6') - - def test_deserialize_entity_newer_version_backports_level2(self): - "Test object with unsupported (newer) version" - self._test_deserialize_entity_newer('1.25', '1.6') - - def test_deserialize_entity_same_revision_does_not_backport(self): - "Test object with supported revision" - self._test_deserialize_entity_newer('1.6', None) - - def test_deserialize_entity_newer_revision_does_not_backport_zero(self): - "Test object with supported revision" - self._test_deserialize_entity_newer('1.6.0', None) - - def test_deserialize_entity_newer_revision_does_not_backport(self): - "Test object with supported (newer) revision" - self._test_deserialize_entity_newer('1.6.1', None) - - def test_deserialize_entity_newer_version_passes_revision(self): - "Test object with unsupported (newer) version and revision" - self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1') diff --git a/magnum/tests/unit/objects/test_x509keypair.py b/magnum/tests/unit/objects/test_x509keypair.py deleted file mode 100644 index 9d5909ce..00000000 --- a/magnum/tests/unit/objects/test_x509keypair.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2015 NEC Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import uuidutils -from testtools.matchers import HasLength - -from magnum.common import exception -from magnum import objects -from magnum.tests.unit.db import base -from magnum.tests.unit.db import utils - - -class TestX509KeyPairObject(base.DbTestCase): - - def setUp(self): - super(TestX509KeyPairObject, self).setUp() - self.fake_x509keypair = utils.get_test_x509keypair() - - def test_get_by_id(self): - x509keypair_id = self.fake_x509keypair['id'] - with mock.patch.object(self.dbapi, 'get_x509keypair_by_id', - autospec=True) as mock_get_x509keypair: - mock_get_x509keypair.return_value = self.fake_x509keypair - x509keypair = objects.X509KeyPair.get(self.context, x509keypair_id) - mock_get_x509keypair.assert_called_once_with(self.context, - x509keypair_id) - self.assertEqual(self.context, x509keypair._context) - - def test_get_by_uuid(self): - uuid = self.fake_x509keypair['uuid'] - with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', - autospec=True) as mock_get_x509keypair: - mock_get_x509keypair.return_value = self.fake_x509keypair - x509keypair = objects.X509KeyPair.get(self.context, uuid) - mock_get_x509keypair.assert_called_once_with(self.context, uuid) - self.assertEqual(self.context, x509keypair._context) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.X509KeyPair.get, self.context, 'not-a-uuid') - - def test_list(self): - with mock.patch.object(self.dbapi, 'get_x509keypair_list', - autospec=True) as mock_get_list: - mock_get_list.return_value = [self.fake_x509keypair] - x509keypairs = objects.X509KeyPair.list(self.context) - self.assertEqual(1, mock_get_list.call_count) - self.assertThat(x509keypairs, HasLength(1)) - self.assertIsInstance(x509keypairs[0], objects.X509KeyPair) - self.assertEqual(self.context, x509keypairs[0]._context) - - def test_list_all(self): - with mock.patch.object(self.dbapi, 'get_x509keypair_list', - autospec=True) as mock_get_list: - mock_get_list.return_value = [self.fake_x509keypair] - self.context.all_tenants = True - x509keypairs = objects.X509KeyPair.list(self.context) - mock_get_list.assert_called_once_with( - self.context, limit=None, marker=None, filters=None, - sort_dir=None, sort_key=None) - self.assertEqual(1, mock_get_list.call_count) - self.assertThat(x509keypairs, HasLength(1)) - self.assertIsInstance(x509keypairs[0], objects.X509KeyPair) - self.assertEqual(self.context, x509keypairs[0]._context) - - def test_create(self): - with mock.patch.object(self.dbapi, 'create_x509keypair', - autospec=True) as mock_create_x509keypair: - mock_create_x509keypair.return_value = self.fake_x509keypair - x509keypair = objects.X509KeyPair(self.context, - **self.fake_x509keypair) - x509keypair.create() - mock_create_x509keypair.assert_called_once_with( - self.fake_x509keypair) - self.assertEqual(self.context, x509keypair._context) - - def test_destroy(self): - uuid = self.fake_x509keypair['uuid'] - with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', - autospec=True) as mock_get_x509keypair: - mock_get_x509keypair.return_value = self.fake_x509keypair - with mock.patch.object(self.dbapi, 'destroy_x509keypair', - autospec=True) as mock_destroy_x509keypair: - x509keypair = objects.X509KeyPair.get_by_uuid(self.context, - uuid) - x509keypair.destroy() - mock_get_x509keypair.assert_called_once_with(self.context, - uuid) - mock_destroy_x509keypair.assert_called_once_with(uuid) - self.assertEqual(self.context, x509keypair._context) - - def test_save(self): - uuid = self.fake_x509keypair['uuid'] - with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', - autospec=True) as mock_get_x509keypair: - mock_get_x509keypair.return_value = self.fake_x509keypair - with mock.patch.object(self.dbapi, 'update_x509keypair', - autospec=True) as mock_update_x509keypair: - x509keypair = objects.X509KeyPair.get_by_uuid(self.context, - uuid) - x509keypair.certificate = 'new_certificate' - x509keypair.save() - - mock_get_x509keypair.assert_called_once_with(self.context, - uuid) - mock_update_x509keypair.assert_called_once_with( - uuid, {'certificate': 'new_certificate'}) - self.assertEqual(self.context, x509keypair._context) - - def test_refresh(self): - uuid = self.fake_x509keypair['uuid'] - new_uuid = uuidutils.generate_uuid() - returns = [dict(self.fake_x509keypair, uuid=uuid), - dict(self.fake_x509keypair, uuid=new_uuid)] - expected = [mock.call(self.context, uuid), - mock.call(self.context, uuid)] - with mock.patch.object(self.dbapi, 'get_x509keypair_by_uuid', - side_effect=returns, - autospec=True) as mock_get_x509keypair: - x509keypair = objects.X509KeyPair.get_by_uuid(self.context, uuid) - self.assertEqual(uuid, x509keypair.uuid) - x509keypair.refresh() - self.assertEqual(new_uuid, x509keypair.uuid) - self.assertEqual(expected, mock_get_x509keypair.call_args_list) - self.assertEqual(self.context, x509keypair._context) diff --git a/magnum/tests/unit/objects/utils.py b/magnum/tests/unit/objects/utils.py deleted file mode 100644 index dbf0db52..00000000 --- a/magnum/tests/unit/objects/utils.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2014 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Magnum object test utilities.""" - - -import datetime - -import iso8601 -import netaddr -from oslo_utils import timeutils -import six - -from magnum.common import exception -from magnum.i18n import _ -from magnum import objects -from magnum.tests.unit.db import utils as db_utils - - -def get_test_cluster_template(context, **kw): - """Return a ClusterTemplate object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - db_cluster_template = db_utils.get_test_cluster_template(**kw) - cluster_template = objects.ClusterTemplate(context) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del db_cluster_template['id'] - - for key in db_cluster_template: - setattr(cluster_template, key, db_cluster_template[key]) - return cluster_template - - -def create_test_cluster_template(context, **kw): - """Create and return a test ClusterTemplate object. - - Create a ClusterTemplate in the DB and return a ClusterTemplate object - with appropriate attributes. - """ - cluster_template = get_test_cluster_template(context, **kw) - try: - cluster_template.create() - except exception.ClusterTemplateAlreadyExists: - cluster_template = objects.ClusterTemplate.get(context, - cluster_template.uuid) - return cluster_template - - -def get_test_cluster(context, **kw): - """Return a Cluster object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - db_cluster = db_utils.get_test_cluster(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del db_cluster['id'] - cluster = objects.Cluster(context) - for key in db_cluster: - setattr(cluster, key, db_cluster[key]) - return cluster - - -def create_test_cluster(context, **kw): - """Create and return a test Cluster object. - - Create a Cluster in the DB and return a Cluster object with appropriate - attributes. - """ - cluster = get_test_cluster(context, **kw) - create_test_cluster_template(context, uuid=cluster['cluster_template_id'], - coe=kw.get('coe', 'swarm'), - tls_disabled=kw.get('tls_disabled')) - cluster.create() - return cluster - - -def get_test_quota(context, **kw): - """Return a Quota object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - db_quota = db_utils.get_test_quota(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del db_quota['id'] - quota = objects.Quota(context) - for key in db_quota: - setattr(quota, key, db_quota[key]) - return quota - - -def create_test_quota(context, **kw): - """Create and return a test Quota object. - - Create a quota in the DB and return a Quota object with appropriate - attributes. - """ - quota = get_test_quota(context, **kw) - quota.create() - return quota - - -def get_test_x509keypair(context, **kw): - """Return a X509KeyPair object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - db_x509keypair = db_utils.get_test_x509keypair(**kw) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del db_x509keypair['id'] - x509keypair = objects.X509KeyPair(context) - for key in db_x509keypair: - setattr(x509keypair, key, db_x509keypair[key]) - return x509keypair - - -def create_test_x509keypair(context, **kw): - """Create and return a test x509keypair object. - - Create a x509keypair in the DB and return a X509KeyPair object with - appropriate attributes. - """ - x509keypair = get_test_x509keypair(context, **kw) - x509keypair.create() - return x509keypair - - -def get_test_magnum_service_object(context, **kw): - """Return a test magnum_service object. - - Get a magnum_service from DB layer and return an object with - appropriate attributes. - """ - db_magnum_service = db_utils.get_test_magnum_service(**kw) - magnum_service = objects.MagnumService(context) - for key in db_magnum_service: - setattr(magnum_service, key, db_magnum_service[key]) - return magnum_service - - -def datetime_or_none(dt): - """Validate a datetime or None value.""" - if dt is None: - return None - elif isinstance(dt, datetime.datetime): - if dt.utcoffset() is None: - # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC, - # but are returned without a timezone attached. - # As a transitional aid, assume a tz-naive object is in UTC. - return dt.replace(tzinfo=iso8601.iso8601.Utc()) - else: - return dt - raise ValueError(_("A datetime.datetime is required here")) - - -def datetime_or_str_or_none(val): - if isinstance(val, six.string_types): - return timeutils.parse_isotime(val) - return datetime_or_none(val) - - -def int_or_none(val): - """Attempt to parse an integer value, or None.""" - if val is None: - return val - else: - return int(val) - - -def str_or_none(val): - """Attempt to stringify a value to unicode, or None.""" - if val is None: - return val - else: - return six.text_type(val) - - -def ip_or_none(version): - """Return a version-specific IP address validator.""" - def validator(val, version=version): - if val is None: - return val - else: - return netaddr.IPAddress(val, version=version) - return validator - - -def dt_serializer(name): - """Return a datetime serializer for a named attribute.""" - def serializer(self, name=name): - if getattr(self, name) is not None: - return datetime.datetime.isoformat(getattr(self, name)) - else: - return None - return serializer - - -def dt_deserializer(instance, val): - """A deserializer method for datetime attributes.""" - if val is None: - return None - else: - return timeutils.parse_isotime(val) diff --git a/magnum/tests/unit/service/__init__.py b/magnum/tests/unit/service/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/service/test_periodic.py b/magnum/tests/unit/service/test_periodic.py deleted file mode 100644 index bb87cfa1..00000000 --- a/magnum/tests/unit/service/test_periodic.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2015 Intel, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from magnum.common import context -from magnum.common.rpc_service import CONF -from magnum.db.sqlalchemy import api as dbapi -from magnum.drivers.common import driver -from magnum import objects -from magnum.objects.fields import ClusterStatus as cluster_status -from magnum.service import periodic -from magnum.tests import base -from magnum.tests import fake_notifier -from magnum.tests import fakes -from magnum.tests.unit.db import utils - - -class fake_stack(object): - def __init__(self, **kw): - for key, val in kw.items(): - setattr(self, key, val) - - -class PeriodicTestCase(base.TestCase): - - def setUp(self): - super(PeriodicTestCase, self).setUp() - - self.context = context.make_admin_context() - - # Can be identical for all clusters. - trust_attrs = { - 'trustee_username': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', - 'trustee_password': 'ain7einaebooVaig6d', - 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', - } - - trust_attrs.update({'id': 1, 'stack_id': '11', - 'status': cluster_status.CREATE_IN_PROGRESS, - 'status_reason': 'no change'}) - cluster1 = utils.get_test_cluster(**trust_attrs) - trust_attrs.update({'id': 2, 'stack_id': '22', - 'status': cluster_status.DELETE_IN_PROGRESS, - 'status_reason': 'no change'}) - cluster2 = utils.get_test_cluster(**trust_attrs) - trust_attrs.update({'id': 3, 'stack_id': '33', - 'status': cluster_status.UPDATE_IN_PROGRESS, - 'status_reason': 'no change'}) - cluster3 = utils.get_test_cluster(**trust_attrs) - trust_attrs.update({'id': 4, 'stack_id': '44', - 'status': cluster_status.DELETE_IN_PROGRESS, - 'status_reason': 'no change'}) - cluster4 = utils.get_test_cluster(**trust_attrs) - trust_attrs.update({'id': 5, 'stack_id': '55', - 'status': cluster_status.ROLLBACK_IN_PROGRESS, - 'status_reason': 'no change'}) - cluster5 = utils.get_test_cluster(**trust_attrs) - - self.cluster1 = objects.Cluster(self.context, **cluster1) - self.cluster2 = objects.Cluster(self.context, **cluster2) - self.cluster3 = objects.Cluster(self.context, **cluster3) - self.cluster4 = objects.Cluster(self.context, **cluster4) - self.cluster5 = objects.Cluster(self.context, **cluster5) - - # these tests are based on the basic behavior of our standard - # Heat-based drivers, but drivers based on other orchestration - # methods should generally behave in a similar fashion as far - # as the actual calls go. It is up to the driver implementor - # to ensure their implementation of update_cluster_status behaves - # as expected regardless of how the periodic updater task works - self.mock_heat_client = mock.MagicMock() - self.stack1 = fake_stack( - id='11', stack_status=cluster_status.CREATE_COMPLETE, - stack_status_reason='fake_reason_11') - self.stack2 = fake_stack( - id='22', stack_status=cluster_status.DELETE_IN_PROGRESS, - stack_status_reason='fake_reason_11') - self.stack3 = fake_stack( - id='33', stack_status=cluster_status.UPDATE_COMPLETE, - stack_status_reason='fake_reason_33') - self.stack5 = fake_stack( - id='55', stack_status=cluster_status.ROLLBACK_COMPLETE, - stack_status_reason='fake_reason_55') - self.mock_heat_client.stacks.list.return_value = [ - self.stack1, self.stack2, self.stack3, self.stack5] - - self.get_stacks = { - '11': self.stack1, - '22': self.stack2, - '33': self.stack3, - '55': self.stack5 - } - - self.mock_driver = mock.MagicMock(spec=driver.Driver) - - def _mock_update_status(context, cluster): - try: - stack = self.get_stacks[cluster.stack_id] - except KeyError: - cluster.status_reason = "Stack %s not found" % cluster.stack_id - if cluster.status == "DELETE_IN_PROGRESS": - cluster.status = cluster_status.DELETE_COMPLETE - else: - cluster.status = cluster.status.replace("IN_PROGRESS", - "FAILED") - cluster.status = cluster.status.replace("COMPLETE", - "FAILED") - else: - if cluster.status != stack.stack_status: - cluster.status = stack.stack_status - cluster.status_reason = stack.stack_status_reason - - self.mock_driver.update_cluster_status.side_effect = ( - _mock_update_status) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=fakes.FakeLoopingCall) - @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') - @mock.patch('magnum.objects.Cluster.list') - @mock.patch.object(dbapi.Connection, 'destroy_cluster') - def test_sync_cluster_status_changes(self, mock_db_destroy, - mock_cluster_list, mock_get_driver): - - mock_cluster_list.return_value = [self.cluster1, self.cluster2, - self.cluster3, self.cluster4, - self.cluster5] - mock_get_driver.return_value = self.mock_driver - - periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) - - self.assertEqual(cluster_status.CREATE_COMPLETE, self.cluster1.status) - self.assertEqual('fake_reason_11', self.cluster1.status_reason) - # make sure cluster 2 didn't change - self.assertEqual(cluster_status.DELETE_IN_PROGRESS, - self.cluster2.status) - self.assertEqual('no change', self.cluster2.status_reason) - self.assertEqual(cluster_status.UPDATE_COMPLETE, self.cluster3.status) - self.assertEqual('fake_reason_33', self.cluster3.status_reason) - mock_db_destroy.assert_called_once_with(self.cluster4.uuid) - self.assertEqual(cluster_status.ROLLBACK_COMPLETE, - self.cluster5.status) - self.assertEqual('fake_reason_55', self.cluster5.status_reason) - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(4, len(notifications)) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=fakes.FakeLoopingCall) - @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') - @mock.patch('magnum.objects.Cluster.list') - def test_sync_cluster_status_not_changes(self, mock_cluster_list, - mock_get_driver): - - self.stack1.stack_status = self.cluster1.status - self.stack2.stack_status = self.cluster2.status - self.stack3.stack_status = self.cluster3.status - self.stack5.stack_status = self.cluster5.status - mock_cluster_list.return_value = [self.cluster1, self.cluster2, - self.cluster3, self.cluster5] - mock_get_driver.return_value = self.mock_driver - periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) - - self.assertEqual(cluster_status.CREATE_IN_PROGRESS, - self.cluster1.status) - self.assertEqual('no change', self.cluster1.status_reason) - self.assertEqual(cluster_status.DELETE_IN_PROGRESS, - self.cluster2.status) - self.assertEqual('no change', self.cluster2.status_reason) - self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, - self.cluster3.status) - self.assertEqual('no change', self.cluster3.status_reason) - self.assertEqual(cluster_status.ROLLBACK_IN_PROGRESS, - self.cluster5.status) - self.assertEqual('no change', self.cluster5.status_reason) - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(0, len(notifications)) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=fakes.FakeLoopingCall) - @mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster') - @mock.patch('magnum.objects.Cluster.list') - @mock.patch.object(dbapi.Connection, 'destroy_cluster') - def test_sync_cluster_status_heat_not_found(self, mock_db_destroy, - mock_cluster_list, - mock_get_driver): - self.get_stacks.clear() - mock_get_driver.return_value = self.mock_driver - mock_cluster_list.return_value = [self.cluster1, self.cluster2, - self.cluster3, self.cluster4, - self.cluster5] - - periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None) - - self.assertEqual(cluster_status.CREATE_FAILED, self.cluster1.status) - self.assertEqual('Stack 11 not found', self.cluster1.status_reason) - self.assertEqual(cluster_status.UPDATE_FAILED, self.cluster3.status) - self.assertEqual('Stack 33 not found', self.cluster3.status_reason) - self.assertEqual(cluster_status.ROLLBACK_FAILED, self.cluster5.status) - self.assertEqual('Stack 55 not found', self.cluster5.status_reason) - mock_db_destroy.assert_has_calls([ - mock.call(self.cluster2.uuid), - mock.call(self.cluster4.uuid) - ]) - self.assertEqual(2, mock_db_destroy.call_count) - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(5, len(notifications)) - - @mock.patch('magnum.conductor.monitors.create_monitor') - @mock.patch('magnum.objects.Cluster.list') - @mock.patch('magnum.common.rpc.get_notifier') - @mock.patch('magnum.common.context.make_admin_context') - def test_send_cluster_metrics(self, mock_make_admin_context, - mock_get_notifier, mock_cluster_list, - mock_create_monitor): - """Test if RPC notifier receives the expected message""" - mock_make_admin_context.return_value = self.context - notifier = mock.MagicMock() - mock_get_notifier.return_value = notifier - mock_cluster_list.return_value = [self.cluster1, self.cluster2, - self.cluster3, self.cluster4] - self.cluster4.status = cluster_status.CREATE_COMPLETE - monitor = mock.MagicMock() - monitor.get_metric_names.return_value = ['metric1', 'metric2'] - monitor.compute_metric_value.return_value = 30 - monitor.get_metric_unit.return_value = '%' - mock_create_monitor.return_value = monitor - - periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) - - expected_event_type = 'magnum.cluster.metrics.update' - expected_metrics = [ - { - 'name': 'metric1', - 'value': 30, - 'unit': '%', - }, - { - 'name': 'metric2', - 'value': 30, - 'unit': '%', - }, - ] - expected_msg = { - 'user_id': self.cluster4.user_id, - 'project_id': self.cluster4.project_id, - 'resource_id': self.cluster4.uuid, - 'metrics': expected_metrics - } - - self.assertEqual(1, mock_create_monitor.call_count) - notifier.info.assert_called_once_with( - self.context, expected_event_type, expected_msg) - - @mock.patch('magnum.conductor.monitors.create_monitor') - @mock.patch('magnum.objects.Cluster.list') - @mock.patch('magnum.common.rpc.get_notifier') - @mock.patch('magnum.common.context.make_admin_context') - def test_send_cluster_metrics_compute_metric_raise( - self, mock_make_admin_context, mock_get_notifier, - mock_cluster_list, mock_create_monitor): - mock_make_admin_context.return_value = self.context - notifier = mock.MagicMock() - mock_get_notifier.return_value = notifier - mock_cluster_list.return_value = [self.cluster4] - self.cluster4.status = cluster_status.CREATE_COMPLETE - monitor = mock.MagicMock() - monitor.get_metric_names.return_value = ['metric1', 'metric2'] - monitor.compute_metric_value.side_effect = Exception( - "error on computing metric") - mock_create_monitor.return_value = monitor - - periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) - - expected_event_type = 'magnum.cluster.metrics.update' - expected_msg = { - 'user_id': self.cluster4.user_id, - 'project_id': self.cluster4.project_id, - 'resource_id': self.cluster4.uuid, - 'metrics': [] - } - self.assertEqual(1, mock_create_monitor.call_count) - notifier.info.assert_called_once_with( - self.context, expected_event_type, expected_msg) - - @mock.patch('magnum.conductor.monitors.create_monitor') - @mock.patch('magnum.objects.Cluster.list') - @mock.patch('magnum.common.rpc.get_notifier') - @mock.patch('magnum.common.context.make_admin_context') - def test_send_cluster_metrics_pull_data_raise( - self, mock_make_admin_context, mock_get_notifier, - mock_cluster_list, mock_create_monitor): - mock_make_admin_context.return_value = self.context - notifier = mock.MagicMock() - mock_get_notifier.return_value = notifier - mock_cluster_list.return_value = [self.cluster4] - self.cluster4.status = cluster_status.CREATE_COMPLETE - monitor = mock.MagicMock() - monitor.pull_data.side_effect = Exception("error on pulling data") - mock_create_monitor.return_value = monitor - - periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) - - self.assertEqual(1, mock_create_monitor.call_count) - self.assertEqual(0, notifier.info.call_count) - - @mock.patch('magnum.conductor.monitors.create_monitor') - @mock.patch('magnum.objects.Cluster.list') - @mock.patch('magnum.common.rpc.get_notifier') - @mock.patch('magnum.common.context.make_admin_context') - def test_send_cluster_metrics_monitor_none( - self, mock_make_admin_context, mock_get_notifier, - mock_cluster_list, mock_create_monitor): - mock_make_admin_context.return_value = self.context - notifier = mock.MagicMock() - mock_get_notifier.return_value = notifier - mock_cluster_list.return_value = [self.cluster4] - self.cluster4.status = cluster_status.CREATE_COMPLETE - mock_create_monitor.return_value = None - - periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context) - - self.assertEqual(1, mock_create_monitor.call_count) - self.assertEqual(0, notifier.info.call_count) diff --git a/magnum/tests/unit/servicegroup/__init__.py b/magnum/tests/unit/servicegroup/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/servicegroup/test_magnum_service.py b/magnum/tests/unit/servicegroup/test_magnum_service.py deleted file mode 100644 index 161603ad..00000000 --- a/magnum/tests/unit/servicegroup/test_magnum_service.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2015 - Yahoo! Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from magnum.common.rpc_service import CONF -from magnum import objects -from magnum.servicegroup import magnum_service_periodic as periodic -from magnum.tests import base - - -class MagnumServicePeriodicTestCase(base.TestCase): - - def setUp(self): - super(MagnumServicePeriodicTestCase, self).setUp() - mock_magnum_service_refresh = mock.Mock() - - class FakeMS(object): - report_state_up = mock_magnum_service_refresh - - self.fake_ms = FakeMS() - self.fake_ms_refresh = mock_magnum_service_refresh - - @mock.patch.object(objects.MagnumService, 'get_by_host_and_binary') - @mock.patch.object(objects.MagnumService, 'create') - @mock.patch.object(objects.MagnumService, 'report_state_up') - def test_update_magnum_service_firsttime(self, - mock_ms_refresh, - mock_ms_create, - mock_ms_get - ): - p_task = periodic.MagnumServicePeriodicTasks(CONF, - 'fake-conductor') - mock_ms_get.return_value = None - - p_task.update_magnum_service(None) - - mock_ms_get.assert_called_once_with(mock.ANY, p_task.host, - p_task.binary) - mock_ms_create.assert_called_once_with() - mock_ms_refresh.assert_called_once_with() - - @mock.patch.object(objects.MagnumService, 'get_by_host_and_binary') - @mock.patch.object(objects.MagnumService, 'create') - def test_update_magnum_service_on_restart(self, - mock_ms_create, - mock_ms_get): - p_task = periodic.MagnumServicePeriodicTasks(CONF, - 'fake-conductor') - mock_ms_get.return_value = self.fake_ms - - p_task.update_magnum_service(None) - - mock_ms_get.assert_called_once_with(mock.ANY, p_task.host, - p_task.binary) - self.fake_ms_refresh.assert_called_once_with() - - def test_update_magnum_service_regular(self): - p_task = periodic.MagnumServicePeriodicTasks(CONF, - 'fake-conductor') - p_task.magnum_service_ref = self.fake_ms - - p_task.update_magnum_service(None) - - self.fake_ms_refresh.assert_called_once_with() diff --git a/magnum/tests/unit/template/__init__.py b/magnum/tests/unit/template/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/magnum/tests/unit/template/test_template.py b/magnum/tests/unit/template/test_template.py deleted file mode 100644 index f7ed985c..00000000 --- a/magnum/tests/unit/template/test_template.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2015 Intel, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -import sys - -from glob import glob -from oslo_config import cfg -from yaml import load - -from magnum.conf import paths -from magnum.tests import base - -cfg.CONF.register_opts([cfg.StrOpt('template_path', - default=paths.basedir_def('templates'), - help='Heat template path')]) - - -class TestTemplate(base.TestCase): - - def test_template_yaml(self): - for yml in [y for x in os.walk(cfg.CONF.template_path) - for y in glob(os.path.join(x[0], '*.yaml'))]: - with open(yml, 'r') as f: - yml_contents = f.read() - try: - load(yml_contents) - except Exception: - error_msg = "file: %s: %s" % (yml, sys.exc_info()[1]) - self.fail(error_msg) diff --git a/magnum/tests/unit/test_hacking.py b/magnum/tests/unit/test_hacking.py deleted file mode 100644 index 3a877340..00000000 --- a/magnum/tests/unit/test_hacking.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2015 Intel, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import textwrap - -import mock -import pep8 - -from magnum.hacking import checks -from magnum.tests import base - - -class HackingTestCase(base.TestCase): - """Hacking test class. - - This class tests the hacking checks in magnum.hacking.checks by passing - strings to the check methods like the pep8/flake8 parser would. The parser - loops over each line in the file and then passes the parameters to the - check method. The parameter names in the check method dictate what type of - object is passed to the check method. The parameter types are:: - - logical_line: A processed line with the following modifications: - - Multi-line statements converted to a single line. - - Stripped left and right. - - Contents of strings replaced with "xxx" of same length. - - Comments removed. - physical_line: Raw line of text from the input file. - lines: a list of the raw lines from the input file - tokens: the tokens that contribute to this logical line - line_number: line number in the input file - total_lines: number of lines in the input file - blank_lines: blank lines before this one - indent_char: indentation character in this file (" " or "\t") - indent_level: indentation (with tabs expanded to multiples of 8) - previous_indent_level: indentation on previous line - previous_logical: previous logical line - filename: Path of the file being run through pep8 - - When running a test on a check method the return will be False/None if - there is no violation in the sample input. If there is an error a tuple is - returned with a position in the line, and a message. So to check the result - just assertTrue if the check is expected to fail and assertFalse if it - should pass. - """ - # We are patching pep8 so that only the check under test is actually - # installed. - - @mock.patch('pep8._checks', - {'physical_line': {}, 'logical_line': {}, 'tree': {}}) - def _run_check(self, code, checker, filename=None): - pep8.register_check(checker) - - lines = textwrap.dedent(code).strip().splitlines(True) - - checker = pep8.Checker(filename=filename, lines=lines) - checker.check_all() - checker.report._deferred_print.sort() - return checker.report._deferred_print - - def _assert_has_errors(self, code, checker, expected_errors=None, - filename=None): - actual_errors = [e[:3] for e in - self._run_check(code, checker, filename)] - self.assertEqual(expected_errors or [], actual_errors) - - def _assert_has_no_errors(self, code, checker, filename=None): - self._assert_has_errors(code, checker, filename=filename) - - def test_assert_equal_in(self): - errors = [(1, 0, "M338")] - check = checks.assert_equal_in - - code = "self.assertEqual(a in b, True)" - self._assert_has_errors(code, check, errors) - - code = "self.assertEqual('str' in 'string', True)" - self._assert_has_errors(code, check, errors) - - code = "self.assertEqual(any(a==1 for a in b), True)" - self._assert_has_no_errors(code, check) - - code = "self.assertEqual(True, a in b)" - self._assert_has_errors(code, check, errors) - - code = "self.assertEqual(True, 'str' in 'string')" - self._assert_has_errors(code, check, errors) - - code = "self.assertEqual(True, any(a==1 for a in b))" - self._assert_has_no_errors(code, check) - - code = "self.assertEqual(a in b, False)" - self._assert_has_errors(code, check, errors) - - code = "self.assertEqual('str' in 'string', False)" - self._assert_has_errors(code, check, errors) - - code = "self.assertEqual(any(a==1 for a in b), False)" - self._assert_has_no_errors(code, check) - - code = "self.assertEqual(False, a in b)" - self._assert_has_errors(code, check, errors) - - code = "self.assertEqual(False, 'str' in 'string')" - self._assert_has_errors(code, check, errors) - - code = "self.assertEqual(False, any(a==1 for a in b))" - self._assert_has_no_errors(code, check) - - def test_no_mutable_default_args(self): - errors = [(1, 0, "M322")] - check = checks.no_mutable_default_args - - code = "def get_info_from_bdm(virt_type, bdm, mapping=[])" - self._assert_has_errors(code, check, errors) - - code = "defined = []" - self._assert_has_no_errors(code, check) - - code = "defined, undefined = [], {}" - self._assert_has_no_errors(code, check) - - def test_assert_is_not_none(self): - errors = [(1, 0, "M302")] - check = checks.assert_equal_not_none - - code = "self.assertEqual(A is not None)" - self._assert_has_errors(code, check, errors) - - code = "self.assertIsNotNone()" - self._assert_has_no_errors(code, check) - - def test_assert_true_isinstance(self): - errors = [(1, 0, "M316")] - check = checks.assert_true_isinstance - - code = "self.assertTrue(isinstance(e, exception.BuilAbortException))" - self._assert_has_errors(code, check, errors) - - code = "self.assertTrue()" - self._assert_has_no_errors(code, check) - - def test_no_xrange(self): - errors = [(1, 0, "M339")] - check = checks.no_xrange - - code = "xrange(45)" - self._assert_has_errors(code, check, errors) - - code = "range(45)" - self._assert_has_no_errors(code, check) - - def test_no_log_warn(self): - errors = [(1, 0, "M352")] - check = checks.no_log_warn - code = """ - LOG.warn("LOG.warn is deprecated") - """ - self._assert_has_errors(code, check, errors) - - code = """ - LOG.warning("LOG.warn is deprecated") - """ - self._assert_has_no_errors(code, check) - - def test_use_timeunitls_utcow(self): - errors = [(1, 0, "M310")] - check = checks.use_timeutils_utcnow - - code = "datetime.now" - self._assert_has_errors(code, check, errors) - - code = "datetime.utcnow" - self._assert_has_errors(code, check, errors) - - code = "datetime.aa" - self._assert_has_no_errors(code, check) - - code = "aaa" - self._assert_has_no_errors(code, check) - - def test_dict_constructor_with_list_copy(self): - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict([(i, connect_info[i])")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " attrs = dict([(k, _from_json(v))")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " type_names = dict((value, key) for key, value in")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict((value, key) for key, value in")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - "foo(param=dict((k, v) for k, v in bar.items()))")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict([[i,i] for i in range(3)])")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dd = dict([i,i] for i in range(3))")))) - - self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( - " create_kwargs = dict(snapshot=snapshot,")))) - - self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( - " self._render_dict(xml, data_el, data.__dict__)")))) - - def test_check_explicit_underscore_import(self): - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "LOG.info(_('My info message'))", - "magnum/tests/other_files.py"))), 1) - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "magnum/tests/other_files.py"))), 1) - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "from magnum.i18n import _", - "magnum/tests/other_files.py"))), 0) - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "LOG.info(_('My info message'))", - "magnum/tests/other_files.py"))), 0) - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "magnum/tests/other_files.py"))), 0) - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "from magnum.i18n import _, _LW", - "magnum/tests/other_files2.py"))), 0) - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "magnum/tests/other_files2.py"))), 0) - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "_ = translations.ugettext", - "magnum/tests/other_files3.py"))), 0) - self.assertEqual(len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "magnum/tests/other_files3.py"))), 0) diff --git a/magnum/tests/utils.py b/magnum/tests/utils.py deleted file mode 100644 index 3dbb77b5..00000000 --- a/magnum/tests/utils.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 - Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from magnum.common import context as magnum_context - - -def dummy_context(user='test_username', project_id='test_tenant_id'): - return magnum_context.RequestContext(user=user, project_id=project_id) diff --git a/magnum/version.py b/magnum/version.py deleted file mode 100644 index 26e6462b..00000000 --- a/magnum/version.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 - Noorul Islam K M -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - - -version_info = pbr.version.VersionInfo('magnum') -version_string = version_info.version_string diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/CVE-2016-7404-f53e62a4a40e4d30.yaml b/releasenotes/notes/CVE-2016-7404-f53e62a4a40e4d30.yaml deleted file mode 100644 index 2edb4102..00000000 --- a/releasenotes/notes/CVE-2016-7404-f53e62a4a40e4d30.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -upgrade: - - | - To let clusters communicate directly with OpenStack service other than - Magnum, in the `trust` section of magnum.conf, set `cluster_user_trust` to - True. The default value is False. -security: - - | - Every magnum cluster is assigned a trustee user and a trustID. This user is - used to allow clusters communicate with the key-manager service (Barbican) - and get the certificate authority of the cluster. This trust user can be - used by other services too. It can be used to let the cluster authenticate - with other OpenStack services like the Block Storage service, Object - Storage service, Load Balancing etc. The cluster with this user and the - trustID has full access to the trustor's OpenStack project. A new - configuration parameter has been added to restrict the access to other - services than Magnum. -fixes: - - | - Fixes CVE-2016-7404 for newly created clusters. Existing clusters will have - to be re-created to benefit from this fix. Part of this fix is the newly - introduced setting `cluster_user_trust` in the `trust` section of - magnum.conf. This setting defaults to False. `cluster_user_trust` dictates - whether to allow passing a trust ID into a cluster's instances. For most - clusters this capability is not needed. Clusters with - `registry_enabled=True` or `volume_driver=rexray` will need this - capability. Other features that require this capability may be introduced - in the future. To be able to create such clusters you will need to set - `cluster_user_trust` to True. diff --git a/releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml b/releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml deleted file mode 100644 index 156563ab..00000000 --- a/releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - Add docker-storage-driver parameter to baymodel to - allow user select from the supported drivers. Until - now, only devicemapper was supported. This release - adds support for OverlayFS on Fedora Atomic hosts with - kernel version >= 3.18 (Fedora 22 or higher) resulting - significant performance improvement. To use OverlayFS, - SELinux must be enabled and in enforcing mode on the - physical machine, but must be disabled in the container. - Thus, if you select overlay for docker-storage-driver - SELinux will be disable inside the containers. diff --git a/releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml b/releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml deleted file mode 100644 index fd5fb939..00000000 --- a/releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - Add flannel's host-gw backend option. Magnum deploys cluster over a - dedicated neutron private network by using flannel. Flannel's host-gw - backend gives the best performance in this topopolgy (private layer2) since - there is no packet processing overhead, no reduction to MTU, scales to many - hosts as well as the alternatives. The label "flannel_use_vxlan" was - repurposed when the network driver is flannel. First, rename the label - flannel_use_vxlan to flannel_backend. Second, redefine the value of this - label from "yes/no" to "udp/vxlan/host-gw". diff --git a/releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml b/releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml deleted file mode 100644 index 4f31d044..00000000 --- a/releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Add support for a new OpenSUSE driver for running k8s cluster on OpenSUSE. - This driver is experimental for now, and operators need to get it from - /contrib folder. diff --git a/releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml b/releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml deleted file mode 100644 index f14f85c1..00000000 --- a/releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - Add configuration for overlay networks for the docker - network driver in swarm. To use this feature, users need - to create a swarm cluster with network_driver set to 'docker'. - After the cluster is created, users can create an overlay network - (docker network create -d overlay mynetwork) and use it when - launching a new container (docker run --net=mynetwork ...). - diff --git a/releasenotes/notes/async-bay-operations-support-9819bd06122ea9e5.yaml b/releasenotes/notes/async-bay-operations-support-9819bd06122ea9e5.yaml deleted file mode 100644 index e380098c..00000000 --- a/releasenotes/notes/async-bay-operations-support-9819bd06122ea9e5.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - Current implementation of magnum bay operations are - synchronous and as a result API requests are blocked - until response from HEAT service is received. This release - adds support for asynchronous bay operations (bay-create, - bay-update, and bay-delete). Please note that with this - change, bay-create, bay-update API calls will return bay uuid - instead of bay object and also return HTTP status code 202 - instead of 201. Microversion 1.2 is added for new behavior. - -upgrade: - - Magnum bay operations API default behavior changed from - synchronous to asynchronous. User can specify - OpenStack-API-Version 1.1 in request header for synchronous - bay operations. diff --git a/releasenotes/notes/bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml b/releasenotes/notes/bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml deleted file mode 100644 index 5e21a46a..00000000 --- a/releasenotes/notes/bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Include kubernetes dashboard in kubernetes cluster by default. Users - can use this kubernetes dashboard to manage the kubernetes cluster. - Dashboard can be disabled by setting the label 'kube_dashboard_enabled' - to false. diff --git a/releasenotes/notes/bp-auto-generate-name-052ea3fdf05fdbbf.yaml b/releasenotes/notes/bp-auto-generate-name-052ea3fdf05fdbbf.yaml deleted file mode 100644 index 070b8cb6..00000000 --- a/releasenotes/notes/bp-auto-generate-name-052ea3fdf05fdbbf.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Auto generate name for cluster and cluster-template. If users create - a cluster/cluster-template without specifying a name, the name will be - auto-generated. diff --git a/releasenotes/notes/bp-barbican-alternative-store-35ec3eda0abb0e25.yaml b/releasenotes/notes/bp-barbican-alternative-store-35ec3eda0abb0e25.yaml deleted file mode 100644 index 5f75f8fd..00000000 --- a/releasenotes/notes/bp-barbican-alternative-store-35ec3eda0abb0e25.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Decouple the hard requirement on barbican. Introduce a new certificate - store called x509keypair. If x509keypair is used, TLS certificates will - be stored at magnum's database instead of barbican. To do that, set the - value of the config ``cert_manager_type`` as ``x509keypair``. diff --git a/releasenotes/notes/bp-container-monitoring-d4bb1cbd0a4e44cc.yaml b/releasenotes/notes/bp-container-monitoring-d4bb1cbd0a4e44cc.yaml deleted file mode 100644 index b6b00641..00000000 --- a/releasenotes/notes/bp-container-monitoring-d4bb1cbd0a4e44cc.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Includes a monitoring stack based on cAdvisor, node-exporter, Prometheus - and Grafana. Users can enable this stack through the label - prometheus_monitoring. Prometheus scrapes metrics from the Kubernetes - cluster and then serves them to Grafana through Grafana's Prometheus - data source. Upon completion, a default Grafana dashboard is provided. diff --git a/releasenotes/notes/bp-decouple-lbaas-c8f2d73313c40b98.yaml b/releasenotes/notes/bp-decouple-lbaas-c8f2d73313c40b98.yaml deleted file mode 100644 index 6d68725f..00000000 --- a/releasenotes/notes/bp-decouple-lbaas-c8f2d73313c40b98.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Decouple the hard requirement on neutron-lbaas. Introduce a new property - master_lb_enabled in cluster template. This property will determines if - a cluster's master nodes should be load balanced. Set the value to false - if neutron-lbaas is not installed. diff --git a/releasenotes/notes/bp-keypair-override-on-create-ca8f12ffca41cd62.yaml b/releasenotes/notes/bp-keypair-override-on-create-ca8f12ffca41cd62.yaml deleted file mode 100644 index ba3c3920..00000000 --- a/releasenotes/notes/bp-keypair-override-on-create-ca8f12ffca41cd62.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -prelude: > - Magnum's keypair-override-on-create blueprint [1] - allows for optional keypair value in ClusterTemplates - and the ability to specify a keypair value during - cluster creation. -features: - - Added parameter in cluster-create to specify the - keypair. If keypair is not provided, the default - value from the matching ClusterTemplate will be used. - - Keypair is now optional for ClusterTemplate, in order - to allow Clusters to use keypairs separate from their - parent ClusterTemplate. -deprecations: - - --keypair-id parameter in magnum CLI - cluster-template-create has been renamed to - --keypair. diff --git a/releasenotes/notes/bp-magnum-notifications-8bd44cfe9e80f82b.yaml b/releasenotes/notes/bp-magnum-notifications-8bd44cfe9e80f82b.yaml deleted file mode 100644 index 5b9da7c0..00000000 --- a/releasenotes/notes/bp-magnum-notifications-8bd44cfe9e80f82b.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Emit notifications when there is an event on a cluster. An event could be - a status change of the cluster due to an operation issued by end-users - (i.e. users create, update or delete the cluster). Notifications are sent - by using oslo.notify and PyCADF. Ceilometer can capture the events and - generate samples for auditing, billing, monitoring, or quota purposes. diff --git a/releasenotes/notes/bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml b/releasenotes/notes/bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml deleted file mode 100644 index 81510e9e..00000000 --- a/releasenotes/notes/bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - > - Enable Mesos cluster to export more slave flags via labels in - cluster template. Add the following labels: mesos_slave_isolation, - mesos_slave_image_providers, mesos_slave_work_dir, - and mesos_slave_executor_environment_variables. diff --git a/releasenotes/notes/bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml b/releasenotes/notes/bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml deleted file mode 100644 index ecc30ad8..00000000 --- a/releasenotes/notes/bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Secure etcd cluster for swarm and k8s. Etcd cluster is - secured using TLS by default. TLS can be disabled by - passing --tls-disabled during cluster template creation. diff --git a/releasenotes/notes/bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml b/releasenotes/notes/bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml deleted file mode 100644 index 456f2ba5..00000000 --- a/releasenotes/notes/bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - Magnum now support SSL for API service. User can enable SSL for API - via new 3 config options 'enabled_ssl', 'ssl_cert_file' and 'ssl_key_file'. - - Change default API development service from wsgiref simple_server to - werkzeug for better supporting SSL. diff --git a/releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml b/releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml deleted file mode 100644 index 2c0101ec..00000000 --- a/releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Add support to store the etcd configuration in a cinder volume. - k8s_fedora_atomic accepts a new label etcd_volume_size defining the size - of the volume. A value of 0 or leaving the label unset means no volume - should be used, and the data will go to the instance local storage. diff --git a/releasenotes/notes/change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml b/releasenotes/notes/change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml deleted file mode 100644 index 891073fe..00000000 --- a/releasenotes/notes/change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -prelude: > - Magnum's bay-to-cluster blueprint [1] required - changes across much of its codebase to align to - industry standards. - - To support this blueprint, certain group and option - names were changed in configuration files [2]. - - See the deprecations section for more details. - - [1] https://review.openstack.org/#/q/topic:bp/rename-bay-to-cluster - [2] https://review.openstack.org/#/c/362660/ - -deprecations: - - The 'bay' group has been renamed to 'cluster' and all - options in the former 'bay' group have been moved to - 'cluster'. - - The 'bay_heat' group has been renamed to 'cluster_heat' - and all options in the former 'bay_heat' group have been - moved to 'cluster_heat'. - - The 'bay_create_timeout' option in the former 'bay_heat' - group has been renamed to 'create_timeout' inside the - 'cluster_heat' group. - - The 'baymodel' group has been renamed to 'cluster_template' - and all options in the former 'baymodel' group have been - moved to 'cluster_template'. diff --git a/releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml b/releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml deleted file mode 100644 index 1c6de079..00000000 --- a/releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Magnum default service type changed from "container" to - "container-infra". It is recommended to update the service - type at Keystone service catalog accordingly. diff --git a/releasenotes/notes/docker-volume-type-46044734f5a27661.yaml b/releasenotes/notes/docker-volume-type-46044734f5a27661.yaml deleted file mode 100644 index ce18082b..00000000 --- a/releasenotes/notes/docker-volume-type-46044734f5a27661.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -features: - - | - Support different volume types for the drivers that support docker storage - in cinder volumes. swarm_fedora_atomic and k8s_fedora_atomic accept a new - label to specify a docker_volume_type. -upgrade: - - | - A new section is created in magnum.conf named cinder. In this cinder - section, you need to set a value for the key default_docker_volume_type, - which should be a valid type for cinder volumes in your cinder deployment. - This default value will be used if no volume_type is provided by the user - when using a cinder volume for container storage. The suggested default - value the one set in cinder.conf of your cinder deployment. diff --git a/releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml b/releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml deleted file mode 100644 index 2892954d..00000000 --- a/releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -security: - - Fix global stack list in periodic task. In before, magnum's periodic task - performs a `stack-list` operation across all tenants. This is disabled - by Heat by default since it causes a security issue. At this release, - magnum performs a `stack-get` operation on each Heat stack by default. - This might not be scalable and operators have an option to fall back to - `stack-list` by setting the config `periodic_global_stack_list` to - `True` (`False` by default) and updating the heat policy file (usually - /etc/heat/policy.json) to allow magnum list stacks. diff --git a/releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml b/releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml deleted file mode 100644 index 0d789565..00000000 --- a/releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Magnum now support OSProfiler for HTTP, RPC and DB request tracing. - User can enable OSProfiler via Magnum configuration file in 'profiler' - section. diff --git a/releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml b/releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml deleted file mode 100644 index 65db0ca3..00000000 --- a/releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Keystone URL used by Cluster Templates instances to authenticate is now - configurable with the ``trustee_keystone_interface`` parameter - which default to ``public``. diff --git a/releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml b/releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml deleted file mode 100644 index 06300fda..00000000 --- a/releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - Currently, the swarm and the kubernetes drivers use - a dedicated cinder volume to store the container - images. It was been observed that one cinder volume - per node is a bottleneck for large clusters. -fixes: - - Make the dedicated cinder volume per node an opt-in - option. By default, no cinder volumes will be created - unless the user passes the docker-volume-size argument. diff --git a/releasenotes/notes/quota-api-182cd1bc9e706b17.yaml b/releasenotes/notes/quota-api-182cd1bc9e706b17.yaml deleted file mode 100644 index 87e4c649..00000000 --- a/releasenotes/notes/quota-api-182cd1bc9e706b17.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - This release introduces 'quota' endpoint that enable admin - users to set, update and show quota for a given tenant. - A non-admin user can get self quota limits. diff --git a/releasenotes/notes/remove-container-endpoint-3494eb8bd2406e87.yaml b/releasenotes/notes/remove-container-endpoint-3494eb8bd2406e87.yaml deleted file mode 100644 index fa8d5ef3..00000000 --- a/releasenotes/notes/remove-container-endpoint-3494eb8bd2406e87.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -prelude: | - Magnum service type and mission statement was changed [1]. - - Change service type from "Container service" to "Container Infrastructure - Management service". In addition, the mission statement is changed to - "To provide a set of services for provisioning, scaling, and managing - container orchestration engines." - - The intend is to narrow the scope of the Magnum project to focus on - integrating container orchestration engines (COEs) with OpenStack. - API features intended to uniformly create, manage, and delete - individual containers across any COE will be removed from Magnum's - API, and will be re-introduced as a separate project called Zun. - - [1] https://review.openstack.org/#/c/311476/ -upgrade: - - All container/pod/service/replication controller operations were removed. - Users are recommended to use the COE's native tool (i.e. docker, kubectl) - to do the equivalent of the removed operations. diff --git a/releasenotes/notes/rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml b/releasenotes/notes/rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml deleted file mode 100644 index 3a7f0388..00000000 --- a/releasenotes/notes/rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Add Microversion 1.3 to support Magnum bay rollback, - user can enable rollback on bay update failure by - setting 'OpenStack-API-Version' to 'container-infra 1.3' - in request header and passing 'rollback=True' param - in bay update request. diff --git a/releasenotes/notes/rotate-cluster-cert-9f84deb0adf9afb1.yaml b/releasenotes/notes/rotate-cluster-cert-9f84deb0adf9afb1.yaml deleted file mode 100644 index e49f18a1..00000000 --- a/releasenotes/notes/rotate-cluster-cert-9f84deb0adf9afb1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Add microversion 1.5 to support rotation of a cluster's CA - certificate. This gives admins a way to restrict/deny access to - an existing cluster once a user has been granted access. diff --git a/releasenotes/notes/stats-api-68bc66147ac027e6.yaml b/releasenotes/notes/stats-api-68bc66147ac027e6.yaml deleted file mode 100644 index d2e25909..00000000 --- a/releasenotes/notes/stats-api-68bc66147ac027e6.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - This release introduces 'stats' endpoint that provide the - total number of clusters and the total number of nodes - for the given tenant and also overall stats across all - the tenants. diff --git a/releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml b/releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml deleted file mode 100644 index 0d53e042..00000000 --- a/releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - Integrate Docker Swarm Fedora Atomic driver with the - Block Storage Service (cinder). The rexray volume - driver was added based on rexray v0.4. Users can - create and attach volumes using docker's navive - client and they will authenticate using the per - cluster trustee user. Rexray can be either added - in the Fedora Atomic image or can be used running - in a container. diff --git a/releasenotes/notes/update-swarm-73d4340a881bff2f.yaml b/releasenotes/notes/update-swarm-73d4340a881bff2f.yaml deleted file mode 100644 index bec4ba4f..00000000 --- a/releasenotes/notes/update-swarm-73d4340a881bff2f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Update Swarm default version to 1.2.5. - It should be the last version since Docker people - are now working on the new Swarm mode integrated in Docker. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 72f7907d..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,280 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Magnum Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Mar 29 10:17:02 2016. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# openstackdocstheme options -repository_name = 'openstack/magnum' -bug_project = 'magnum' -bug_tag = '' -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Magnum Release Notes' -copyright = u'2016, Magnum developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from magnum.version import version_info as magnum_version -version = magnum_version.canonical_version_string() -# The full version, including alpha/beta/rc tags. -release = magnum_version.version_string_with_vcs() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'MagnumReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'MagnumReleaseNotes.tex', u'Magnum Release Notes Documentation', - u'2016, Magnum developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'magnumreleasenotes', u'Magnum Release Notes Documentation', - [u'2016, Magnum developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'MagnumReleaseNotes', u'Magnum Release Notes Documentation', - u'2016, Magnum developers', 'MagnumReleaseNotes', - 'One line description of project.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 71eb369f..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. Magnum Release Notes documentation master file, created by - sphinx-quickstart on Tue Mar 29 10:17:02 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Magnum Release Notes's documentation! -================================================ - -Contents: - -.. toctree:: - :maxdepth: 2 - - unreleased - ocata - newton - mitaka - liberty - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index 5d7e7f83..00000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= - Liberty Series Release Notes -============================= - -.. release-notes:: - :branch: origin/stable/liberty diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index 97ab8d1d..00000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ - Mitaka Series Release Notes -============================ - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 97036ed2..00000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Newton Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42..00000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 875030f9..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -Current Series Release Notes -============================ - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 3a8eb220..00000000 --- a/requirements.txt +++ /dev/null @@ -1,55 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Despite above warning added by global sync process, please use -# ascii betical order. - -Babel!=2.4.0,>=2.3.4 # BSD -PyYAML>=3.10.0 # MIT -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT -WSME>=0.8 # MIT -WebOb>=1.7.1 # MIT -alembic>=0.8.10 # MIT -cliff>=2.8.0 # Apache-2.0 -decorator>=3.4.0 # BSD -docker>=2.0.0 # Apache-2.0 -enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD -eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT -iso8601>=0.1.11 # MIT -jsonpatch>=1.1 # BSD -keystoneauth1>=3.0.1 # Apache-2.0 -keystonemiddleware>=4.12.0 # Apache-2.0 -kubernetes>=1.0.0b1 # Apache-2.0 -marathon>=0.8.6 # MIT -netaddr!=0.7.16,>=0.7.13 # BSD -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.db>=4.24.0 # Apache-2.0 -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -oslo.middleware>=3.27.0 # Apache-2.0 -oslo.policy>=1.23.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 -oslo.versionedobjects>=1.17.0 # Apache-2.0 -oslo.reports>=0.6.0 # Apache-2.0 -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD -pycadf!=2.0.0,>=1.1.0 # Apache-2.0 -python-barbicanclient>=4.0.0 # Apache-2.0 -python-glanceclient>=2.7.0 # Apache-2.0 -python-heatclient>=1.6.1 # Apache-2.0 -python-neutronclient>=6.3.0 # Apache-2.0 -python-novaclient>=9.0.0 # Apache-2.0 -python-keystoneclient>=3.8.0 # Apache-2.0 -requests>=2.14.2 # Apache-2.0 -setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=16.0 # PSF/ZPL -six>=1.9.0 # MIT -stevedore>=1.20.0 # Apache-2.0 -taskflow>=2.7.0 # Apache-2.0 -cryptography>=1.6 # BSD/Apache-2.0 -Werkzeug>=0.7 # BSD License diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 99c3b57e..00000000 --- a/setup.cfg +++ /dev/null @@ -1,85 +0,0 @@ -[metadata] -name = magnum -summary = Container Management project for OpenStack -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://docs.openstack.org/magnum/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -[files] -packages = - magnum - -[build_sphinx] -source-dir = doc/source -build-dir = doc/build -all_files = 1 -warning-is-error = 1 - -[upload_sphinx] -upload-dir = doc/build/html - -[compile_catalog] -directory = magnum/locale -domain = magnum - -[update_catalog] -domain = magnum -output_dir = magnum/locale -input_file = magnum/locale/magnum.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = magnum/locale/magnum.pot - -[entry_points] -console_scripts = - magnum-api = magnum.cmd.api:main - magnum-conductor = magnum.cmd.conductor:main - magnum-db-manage = magnum.cmd.db_manage:main - magnum-driver-manage = magnum.cmd.driver_manage:main - -oslo.config.opts = - magnum = magnum.opts:list_opts - magnum.conf = magnum.conf.opts:list_opts - -oslo.config.opts.defaults = - magnum = magnum.common.config:set_cors_middleware_defaults - -magnum.drivers = - k8s_fedora_atomic_v1 = magnum.drivers.k8s_fedora_atomic_v1.driver:Driver - k8s_coreos_v1 = magnum.drivers.k8s_coreos_v1.driver:Driver - swarm_fedora_atomic_v1 = magnum.drivers.swarm_fedora_atomic_v1.driver:Driver - swarm_fedora_atomic_v2 = magnum.drivers.swarm_fedora_atomic_v2.driver:Driver - mesos_ubuntu_v1 = magnum.drivers.mesos_ubuntu_v1.driver:Driver - k8s_fedora_ironic_v1 = magnum.drivers.k8s_fedora_ironic_v1.driver:Driver - -magnum.database.migration_backend = - sqlalchemy = magnum.db.sqlalchemy.migration - -magnum.cert_manager.backend = - barbican = magnum.common.cert_manager.barbican_cert_manager - local = magnum.common.cert_manager.local_cert_manager - x509keypair = magnum.common.cert_manager.x509keypair_cert_manager - -tempest.test_plugins = - magnum_tests = magnum.tests.functional.tempest_tests.plugin:MagnumTempestPlugin -[wheel] -universal = 1 - -[extras] -osprofiler = - osprofiler>=1.4.0 # Apache-2.0 diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d8443..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/specs/async-container-operation.rst b/specs/async-container-operation.rst deleted file mode 100644 index fa8ba737..00000000 --- a/specs/async-container-operation.rst +++ /dev/null @@ -1,452 +0,0 @@ -================================= -Asynchronous Container Operations -================================= - -Launchpad blueprint: - -https://blueprints.launchpad.net/magnum/+spec/async-container-operations - -At present, container operations are done in a synchronous way, end-to-end. -This model does not scale well, and incurs a penalty on the client to be -stuck till the end of completion of the operation. - -Problem Description -------------------- - -At present Magnum-Conductor executes the container operation as part of -processing the request forwarded from Magnum-API. For -container-create, if the image needs to be pulled down, it may take -a while depending on the responsiveness of the registry, which can be a -substantial delay. At the same time, experiments suggest that even for -pre-pulled image, the time taken by each operations, namely -create/start/delete, are in the same order, as it involves complete turn -around between the magnum-client and the COE-API, via Magnum-API and -Magnum-Conductor[1]. - -Use Cases ---------- - -For wider enterprise adoption of Magnum, we need it to scale better. -For that we need to replace some of these synchronous behaviors with -suitable alternative of asynchronous implementation. - -To understand the use-case better, we can have a look at the average -time spent during container operations, as noted at[1]. - -Proposed Changes ----------------- - -The design has been discussed over the ML[6]. The conclusions have been kept -on the 'whiteboard' of the Blueprint. - -The amount of code change is expected to be significant. To ease the -process of adoption, code review, functional tests, an approach of phased -implementation may be required. We can define the scope of the three phases of -the implementation as follows - - -* Phase-0 will bring in the basic feature of asynchronous mode of operation in - Magnum - (A) from API to Conductor and (B) from Conductor to COE-API. During - phase-0, this mode will be optional through configuration. - - Both the communications of (A) and (B) are proposed to be made asynchronous - to achieve the best of it. If we do (A) alone, it does not gain us much, as - (B) takes up the higher cycles of the operation. If we do (B) alone, it does - not make sense, as (A) will synchronously wait for no meaningful data. - -* Phase-1 will concentrate on making the feature persistent to address various - scenarios of conductor restart, worker failure etc. We will support this - feature for multiple Conductor-workers in this phase. - -* Phase-2 will select asynchronous mode of operation as the default mode. At - the same time, we can evaluate to drop the code for synchronous mode, too. - - -Phase-0 is required as a meaningful temporary step, to establish the -importance and tangible benefits of phase-1. This is also to serve as a -proof-of-concept at a lower cost of code changes with a configurable option. -This will enable developers and operators to have a taste of the feature, -before bringing in the heavier dependencies and changes proposed in phase-1. - -A reference implementation for the phase-0 items, has been put for review[2]. - -Following is the summary of the design - - -1. Configurable mode of operation - async ------------------------------------------ - -For ease of adoption, the async_mode of communication between API-conductor, -conductor-COE in magnum, can be controlled using a configuration option. So -the code-path for sync mode and async mode would co-exist for now. To achieve -this with minimal/no code duplication and cleaner interface, we are using -openstack/futurist[4]. Futurist interface hides the details of type of executor -being used. In case of async configuration, a greenthreadpool of configured -poolsize gets created. Here is a sample of how the config would look -like: :: - - [DEFAULT] - async_enable = False - - [conductor] - async_threadpool_max_workers = 64 - -Futurist library is used in oslo.messaging. Thus, it is used by almost all -OpenStack projects, in effect. Futurist is very useful to run same code -under different execution model and hence saving potential duplication of -code. - - -2. Type of operations ---------------------- - -There are two classes of container operations - one that can be made async, -namely create/delete/start/stop/pause/unpause/reboot, which do not need data -about the container in return. The other type requires data, namely -container-logs. For async-type container-operations, magnum-API will be -using 'cast' instead of 'call' from oslo_messaging[5]. - -'cast' from oslo.messaging.rpcclient is used to invoke a method and return -immediately, whereas 'call' invokes a method and waits for a reply. While -operating in asynchronous mode, it is intuitive to use cast method, as the -result of the response may not be available immediately. - -Magnum-api first fetches the details of a container, by doing -'get_rpc_resource'. This function uses magnum objects. Hence, this function -uses a 'call' method underneath. Once, magnum-api gets back the details, -it issues the container operation next, using another 'call' method. -The above proposal is to replace the second 'call' with 'cast'. - -If user issues a container operation, when there is no listening -conductor (because of process failure), there will be a RPC timeout at the -first 'call' method. In this case, user will observe the request to -get blocked at client and finally fail with HTTP 500 ERROR, after the RPC -timeout, which is 60 seconds by default. This behavior is independent of the -usage of 'cast' or 'call' for the second message, mentioned above. This -behavior does not influence our design, but it is documented here for clarity -of understanding. - - -3. Ensuring the order of execution - Phase-0 --------------------------------------------- - -Magnum-conductor needs to ensure that for a given bay and given container, -the operations are executed in sequence. In phase-0, we want to demonstrate -how asynchronous behavior helps scaling. Asynchronous mode of container -operations would be supported for single magnum-conductor scenario, in -phase-0. If magnum-conductor crashes, there will be no recovery for the -operations accepted earlier - which means no persistence in phase-0, for -operations accepted by magnum-conductor. Multiple conductor scenario and -persistence will be addressed in phase-1 [please refer to the next section -for further details]. If COE crashes or does not respond, the error will be -detected, as it happens in sync mode, and reflected on the container-status. - -Magnum-conductor will maintain a job-queue. Job-queue is indexed by bay-id and -container-id. A job-queue entry would contain the sequence of operations -requested for a given bay-id and container-id, in temporal order. A -greenthread will execute the tasks/operations in order for a given job-queue -entry, till the queue empties. Using a greenthread in this fashion saves us -from the cost and complexity of locking, along with functional correctness. -When request for new operation comes in, it gets appended to the corresponding -queue entry. - -For a sequence of container operations, if an intermediate operation fails, -we will stop continuing the sequence. The community feels more confident to -start with this strictly defensive policy[17]. The failure will be logged -and saved into the container-object, which will help an operator be informed -better about the result of the sequence of container operations. We may revisit -this policy later, if we think it is too restrictive. - -4. Ensuring the order of execution - phase-1 --------------------------------------------- - -The goal is to execute requests for a given bay and a given container in -sequence. In phase-1, we want to address persistence and capability of -supporting multiple magnum-conductor processes. To achieve this, we will -reuse the concepts laid out in phase-0 and use a standard library. - -We propose to use taskflow[7] for this implementation. Magnum-conductors -will consume the AMQP message and post a task[8] on a taskflow jobboard[9]. -Greenthreads from magnum-conductors would subscribe to the taskflow -jobboard as taskflow-conductors[10]. Taskflow jobboard is maintained with -a choice of persistent backend[11]. This will help address the concern of -persistence for accepted operations, when a conductor crashes. Taskflow -will ensure that tasks, namely container operations, in a job, namely a -sequence of operations for a given bay and container, would execute in -sequence. We can easily notice that some of the concepts used in phase-0 -are reused as it is. For example, job-queue maps to jobboard here, use of -greenthread maps to the conductor concept of taskflow. Hence, we expect easier -migration from phase-0 to phase-1, with the choice of taskflow. - -For taskflow jobboard[11], the available choices of backend are Zookeeper and -Redis. But, we plan to use MySQL as default choice of backend, for magnum -conductor jobboard use-case. This support will be added to taskflow. Later, -we may choose to support the flexibility of other backends like ZK/Redis via -configuration. But, phase-1 will keep the implementation simple with MySQL -backend and revisit this, if required. - -Let's consider the scenarios of Conductor crashing - - - If a task is added to jobboard, and conductor crashes after that, - taskflow can assign a particular job to any available greenthread agents - from other conductor instances. If the system was running with single - magnum-conductor, it will wait for the conductor to come back and join. - - A task is picked up and magnum-conductor crashes. In this case, the task - is not complete from jobboard point-of-view. As taskflow detects the - conductor going away, it assigns another available conductor. - - When conductor picks up a message from AMQP, it will acknowledge AMQP, - only after persisting it to jobboard. This will prevent losing the message, - if conductor crashes after picking up the message from AMQP. Explicit - acknowledgement from application may use NotificationResult.HANDLED[12] - to AMQP. We may use the at-least-one-guarantee[13] feature in - oslo.messaging[14], as it becomes available. - -To summarize some of the important outcomes of this proposal - - - A taskflow job represents the sequence of container operations on a given - bay and given container. At a given point of time, the sequence may contain - a single or multiple operations. - - There will be a single jobboard for all conductors. - - Task-flow conductors are multiple greenthreads from a given - magnum-conductor. - - Taskflow-conductor will run in 'blocking' mode[15], as those greenthreads - have no other job than claiming and executing the jobs from jobboard. - - Individual jobs are supposed to maintain a temporal sequence. So the - taskflow-engine would be 'serial'[16]. - - The proposed model for a 'job' is to consist of a temporal sequence of - 'tasks' - operations on a given bay and a given container. Henceforth, - it is expected that when a given operation, namely container-create is in - progress, a request for container-start may come in. Adding the task to - the existing job is intuitive to maintain the sequence of operations. - -To fit taskflow exactly into our use-case, we may need to do two enhancements -in taskflow - -- Supporting mysql plugin as a DB backend for jobboard. Support for redis -exists, so it will be similar. -We do not see any technical roadblock for adding mysql support for taskflow -jobboard. If the proposal does not get approved by taskflow team, we may have -to use redis, as an alternative option. -- Support for dynamically adding tasks to a job on jobboard. This also looks -feasible, as discussed over the #openstack-state-management [Unfortunately, -this channel is not logged, but if we agree in this direction, we can initiate -discussion over ML, too] -If taskflow team does not allow adding this feature, even though they have -agreed now, we will use the dependency feature in taskflow. We will explore -and elaborate this further, if it requires. - - -5. Status of progress ---------------------- - -The progress of execution of a container operation is reflected on the status -of a container as - 'create-in-progress', 'delete-in-progress' etc. - -Alternatives ------------- - -Without an asynchronous implementation, Magnum will suffer from complaints -about poor scalability and slowness. - -In this design, stack-lock[3] has been considered as an alternative to -taskflow. Following are the reasons for preferring taskflow over -stack-lock, as of now, -- Stack-lock used in Heat is not a library, so it will require making a copy -for Magnum, which is not desirable. -- Taskflow is relatively mature, well supported, feature-rich library. -- Taskflow has in-built capacity to scale out[in] as multiple conductors -can join in[out] the cluster. -- Taskflow has a failure detection and recovery mechanism. If a process -crashes, then worker threads from other conductor may continue the execution. - -In this design, we describe futurist[4] as a choice of implementation. The -choice was to prevent duplication of code for async and sync mode. For this -purpose, we could not find any other solution to compare. - -Data model impact ------------------ - -Phase-0 has no data model impact. But phase-1 may introduce an additional -table into the Magnum database. As per the present proposal for using taskflow -in phase-1, we have to introduce a new table for jobboard under magnum db. -This table will be exposed to taskflow library as a persistent db plugin. -Alternatively, an implementation with stack-lock will also require an -introduction of a new table for stack-lock objects. - -REST API impact ---------------- - -None. - -Security impact ---------------- - -None. - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance impact ------------------- - -Asynchronous mode of operation helps in scalability. Hence, it improves -responsiveness and reduces the turn around time in a significant -proportion. A small test on devstack, comparing both the modes, -demonstrate this with numbers.[1] - -Other deployer impact ---------------------- - -None. - -Developer impact ----------------- - -None - -Implementation --------------- - -Assignee(s) ------------ - -Primary assignee - suro-patz(Surojit Pathak) - -Work Items ----------- - -For phase-0 -* Introduce config knob for asynchronous mode of container operations. - -* Changes for Magnum-API to use CAST instead of CALL for operations eligible - for asynchronous mode. - -* Implement the in-memory job-queue in Magnum conductor, and integrate futurist - library. - -* Unit tests and functional tests for async mode. - -* Documentation changes. - -For phase-1 -* Get the dependencies on taskflow being resolved. - -* Introduce jobboard table into Magnum DB. - -* Integrate taskflow in Magnum conductor to replace the in-memory job-queue - with taskflow jobboard. Also, we need conductor greenthreads to subscribe - as workers to the taskflow jobboard. - -* Add unit tests and functional tests for persistence and multiple conductor - scenario. - -* Documentation changes. - -For phase-2 -* We will promote asynchronous mode of operation as the default mode of -operation. - -* We may decide to drop the code for synchronous mode and corresponding config. - -* Documentation changes. - - -Dependencies ------------- - -For phase-1, if we choose to implement using taskflow, we need to get -following two features added to taskflow first - -* Ability to add new task to an existing job on jobboard. -* mysql plugin support as persistent DB. - -Testing -------- - -All the existing test cases are run to ensure async mode does not break them. -Additionally more functional tests and unit tests will be added specific to -async mode. - -Documentation Impact --------------------- - -Magnum documentation will include a description of the option for asynchronous -mode of container operations and its benefits. We will also add to -developer documentation on guideline for implementing a container operation in -both the modes - sync and async. We will add a section on 'how to debug -container operations in async mode'. The phase-0 and phase-1 implementation -and their support for single or multiple conductors will be clearly documented -for the operators. - -References ----------- - -[1] - Execution time comparison between sync and async modes: - -https://gist.github.com/surojit-pathak/2cbdad5b8bf5b569e755 - -[2] - Proposed change under review: - -https://review.openstack.org/#/c/267134/ - -[3] - Heat's use of stacklock - -http://docs.openstack.org/developer/heat/_modules/heat/engine/stack_lock.html - -[4] - openstack/futurist - -http://docs.openstack.org/developer/futurist/ - -[5] - openstack/oslo.messaging - -http://docs.openstack.org/developer/oslo.messaging/rpcclient.html - -[6] - ML discussion on the design - -http://lists.openstack.org/pipermail/openstack-dev/2015-December/082524.html - -[7] - Taskflow library - -http://docs.openstack.org/developer/taskflow/ - -[8] - task in taskflow - -http://docs.openstack.org/developer/taskflow/atoms.html#task - -[9] - job and jobboard in taskflow - -http://docs.openstack.org/developer/taskflow/jobs.html - -[10] - conductor in taskflow - -http://docs.openstack.org/developer/taskflow/conductors.html - -[11] - persistent backend support in taskflow - -http://docs.openstack.org/developer/taskflow/persistence.html - -[12] - oslo.messaging notification handler - -http://docs.openstack.org/developer/oslo.messaging/notification_listener.html - -[13] - Blueprint for at-least-once-guarantee, oslo.messaging - -https://blueprints.launchpad.net/oslo.messaging/+spec/at-least-once-guarantee - -[14] - Patchset under review for at-least-once-guarantee, oslo.messaging - -https://review.openstack.org/#/c/229186/ - -[15] - Taskflow blocking mode for conductor - -http://docs.openstack.org/developer/taskflow/conductors.html#taskflow.conductors.backends.impl_executor.ExecutorConductor - -[16] - Taskflow serial engine - -http://docs.openstack.org/developer/taskflow/engines.html - -[17] - Community feedback on policy to handle failure within a sequence - -http://eavesdrop.openstack.org/irclogs/%23openstack-containers/%23openstack-containers.2016-03-08.log.html#t2016-03-08T20:41:17 diff --git a/specs/bay-drivers.rst b/specs/bay-drivers.rst deleted file mode 100644 index aac18d52..00000000 --- a/specs/bay-drivers.rst +++ /dev/null @@ -1,344 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================================== -Container Orchestration Engine drivers -====================================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/magnum/+spec/bay-drivers - -Container Orchestration Engines (COEs) are different systems for managing -containerized applications in a clustered environment, each having their own -conventions and ecosystems. Three of the most common, which also happen to be -supported in Magnum, are: Docker Swarm, Kubernetes, and Mesos. In order to -successfully serve developers, Magnum needs to be able to provision and manage -access to the latest COEs through its API in an effective and scalable way. - - -Problem description -=================== - -Magnum currently supports the three most popular COEs, but as more emerge and -existing ones change, it needs an effective and scalable way of managing -them over time. - -One of the problems with the current implementation is that COE-specific logic, -such as Kubernetes replication controllers and services, is situated in the -core Magnum library and made available to users through the main API. Placing -COE-specific logic in a core API introduces tight coupling and forces -operators to work with an inflexible design. - -By formalising a more modular and extensible architecture, Magnum will be -in a much better position to help operators and consumers satisfy custom -use-cases. - -Use cases ---------- - -1. Extensibility. Contributors and maintainers need a suitable architecture to - house current and future COE implementations. Moving to a more extensible - architecture, where core classes delegate to drivers, provides a more - effective and elegant model for handling COE differences without the need - for tightly coupled and monkey-patched logic. - - One of the key use cases is allowing operators to customise their - orchestration logic, such as modifying Heat templates or even using their - own tooling like Ansible. Moreover, operators will often expect to use a - custom distro image with lots of software pre-installed and many special - security requirements that is extremely difficult or impossible to do with - the current upstream templates. COE drivers solves these problems. - -2. Maintainability. Moving to a modular architecture will be easier to manage - in the long-run because the responsibility of maintaining non-standard - implementations is shifted into the operator's domain. Maintaining the - default drivers which are packaged with Magnum will also be easier and - cleaner since logic is now demarcated from core codebase directories. - -3. COE & Distro choice. In the community there has been a lot of discussion - about which distro and COE combination to support with the templates. - Having COE drivers allows for people or organizations to maintain - distro-specific implementations (e.g CentOS+Kubernetes). - -4. Addresses dependency concerns. One of the direct results of - introducing a driver model is the ability to give operators more freedom - about choosing how Magnum integrates with the rest of their OpenStack - platform. For example, drivers would remove the necessity for users to - adopt Barbican for secret management. - -5. Driver versioning. The new driver model allows operators to modify existing - drivers or creating custom ones, release new bay types based on the newer - version, and subsequently launch news bays running the updated - functionality. Existing bays which are based on older driver versions would - be unaffected in this process and would still be able to have lifecycle - operations performed on them. If one were to list their details from the - API, it would reference the old driver version. An operator can see which - driver version a bay type is based on through its ``driver`` value, - which is exposed through the API. - -Proposed change -=============== - -1. The creation of new directory at the project root: ``./magnum/drivers``. - Each driver will house its own logic inside its own directory. Each distro - will house its own logic inside that driver directory. For example, the - Fedora Atomic distro using Swarm will have the following directory - structure: - - :: - - drivers/ - swarm_atomic_v1/ - image/ - ... - templates/ - ... - api.py - driver.py - monitor.py - scale.py - template_def.py - version.py - - - The directory name should be a string which uniquely identifies the driver - and provides a descriptive reference. The driver version number and name are - provided in the manifest file and will be included in the bay metadata at - cluster build time. - - There are two workflows for rolling out driver updates: - - - if the change is relatively minor, they modify the files in the - existing driver directory and update the version number in the manifest - file. - - - if the change is significant, they create a new directory - (either from scratch or by forking). - - Further explanation of the three top-level files: - - - an ``image`` directory is *optional* and should contain documentation - which tells users how to build the image and register it to glance. This - directory can also hold artifacts for building the image, for instance - diskimagebuilder elements, scripts, etc. - - - a ``templates`` directory is *required* and will (for the foreseeable - future) store Heat template YAML files. In the future drivers will allow - operators to use their own orchestration tools like Ansible. - - - ``api.py`` is *optional*, and should contain the API controller which - handles custom API operations like Kubernetes RCs or Pods. It will be - this class which accepts HTTP requests and delegates to the Conductor. It - should contain a uniquely named class, such as ``SwarmAtomicXYZ``, which - extends from the core controller class. The COE class would have the - opportunity of overriding base methods if necessary. - - - ``driver.py`` is *required*, and should contain the logic which maps - controller actions to COE interfaces. These include: ``bay_create``, - ``bay_update``, ``bay_delete``, ``bay_rebuild``, ``bay_soft_reboot`` and - ``bay_hard_reboot``. - - - ``version.py`` is *required*, and should contain the version number of - the bay driver. This is defined by a ``version`` attribute and is - represented in the ``1.0.0`` format. It should also include a ``Driver`` - attribute and should be a descriptive name such as ``swarm_atomic``. - - Due to the varying nature of COEs, it is up to the bay - maintainer to implement this in their own way. Since a bay is a - combination of a COE and an image, ``driver.py`` will also contain - information about the ``os_distro`` property which is expected to be - attributed to Glance image. - - - ``monitor.py`` is *optional*, and should contain the logic which monitors - the resource utilization of bays. - - - ``template_def.py`` is *required* and should contain the COE's - implementation of how orchestration templates are loaded and matched to - Magnum objects. It would probably contain multiple classes, such as - ``class SwarmAtomicXYZTemplateDef(BaseTemplateDefinition)``. - - - ``scale.py`` is *optional* per bay specification and should contain the - logic for scaling operations. - -2. Renaming the ``coe`` attribute of BayModel to ``driver``. Because this - value would determine which driver classes and orchestration templates to - load, it would need to correspond to the name of the driver as it is - registered with stevedore_ and setuptools entry points. - - During the lifecycle of an API operation, top-level Magnum classes (such as - a Bay conductor) would then delegate to the driver classes which have been - dynamically loaded. Validation will need to ensure that whichever value - is provided by the user is correct. - - By default, drivers are located under the main project directory and their - namespaces are accessible via ``magnum.drivers.foo``. But a use case that - needs to be looked at and, if possible, provided for is drivers which are - situated outside the project directory, for example in - ``/usr/share/magnum``. This will suit operators who want greater separation - between customised code and Python libraries. - -3. The driver implementations for the 3 current COE and Image combinations: - Docker Swarm Fedora, Kubernetes Fedora, Kubernetes CoreOS, and Mesos - Ubuntu. Any templates would need to be moved from - ``magnum/templates/{coe_name}`` to - ``magnum/drivers/{driver_name}/templates``. - -4. Removal of the following files: - - :: - - magnum/magnum/conductor/handlers/ - docker_conductor.py - k8s_conducter.py - -Design Principles ------------------ - -- Minimal, clean API without a high cognitive burden - -- Ensure Magnum's priority is to do one thing well, but allow extensibility - by external contributors - -- Do not force ineffective abstractions that introduce feature divergence - -- Formalise a modular and loosely coupled driver architecture that removes - COE logic from the core codebase - - -Alternatives ------------- - -This alternative relates to #5 of Proposed Change. Instead of having a -drivers registered using stevedore_ and setuptools entry points, an alternative -is to use the Magnum config instead. - - -Data model impact ------------------ - -Since drivers would be implemented for the existing COEs, there would be -no loss of functionality for end-users. - - -REST API impact ---------------- - -Attribute change when creating and updating a BayModel (``coe`` to -``driver``). This would occur before v1 of the API is frozen. - -COE-specific endpoints would be removed from the core API. - - -Security impact ---------------- - -None - - -Notifications impact --------------------- - -None - - -Other end user impact ---------------------- - -There will be deployer impacts because deployers will need to select -which drivers they want to activate. - - -Performance Impact ------------------- - -None - - - -Other deployer impact ---------------------- - -In order to utilize new functionality and bay drivers, operators will need -to update their installation and configure bay models to use a driver. - - -Developer impact ----------------- - -Due to the significant impact on the current codebase, a phased implementation -approach will be necessary. This is defined in the Work Items section. - -Code will be contributed for COE-specific functionality in a new way, and will -need to abide by the new architecture. Documentation and a good first -implementation will play an important role in helping developers contribute -new functionality. - - -Implementation -============== - - -Assignee(s) ------------ - -Primary assignee: -murali-allada - -Other contributors: -jamiehannaford -strigazi - - -Work Items ----------- - -1. New ``drivers`` directory - -2. Change ``coe`` attribute to ``driver`` - -3. COE drivers implementation (swarm-fedora, k8s-fedora, k8s-coreos, - mesos-ubuntu). Templates should remain in directory tree until their - accompanying driver has been implemented. - -4. Delete old conductor files - -5. Update client - -6. Add documentation - -7. Improve user experience for operators of forking/creating new - drivers. One way we could do this is by creating new client commands or - scripts. This is orthogonal to this spec, and will be considered after - its core implementation. - -Dependencies -============ - -None - - -Testing -======= - -Each commit will be accompanied with unit tests, and Tempest functional tests. - - -Documentation Impact -==================== - -A set of documentation for this architecture will be required. We should also -provide a developer guide for creating a new bay driver and updating existing -ones. - - -References -========== - -`Using Stevedore in your Application -`_. - -.. _stevedore: http://docs.openstack.org/developer/stevedore/ diff --git a/specs/container-networking-model.rst b/specs/container-networking-model.rst deleted file mode 100644 index 70d8f670..00000000 --- a/specs/container-networking-model.rst +++ /dev/null @@ -1,458 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================= -Magnum Container Networking Model -================================= - -Launchpad Blueprint: - -https://blueprints.launchpad.net/magnum/+spec/extensible-network-model - -For Magnum to prosper, the project must support a range of networking tools -and techniques, while maintaining a simple, developer-focused user -experience. The first step in achieving this goal is to standardize the -process of allocating networking to containers, while providing an -abstraction for supporting various networking capabilities through -pluggable back-end implementations. This document recommends using Docker's -libnetwork library to implement container networking abstractions and -plugins. Since libnetwork is not a standard and the container ecosystem -is rapidly evolving, the Magnum community should continue evaluating -container networking options on a frequent basis. - -Problem Description -=================== - -The container networking ecosystem is undergoing rapid changes. The -networking tools and techniques used in today's container deployments are -different than twelve months ago and will continue to evolve. For example, -Flannel [6]_, Kubernetes preferred networking implementation, was initially -released in July of 2014 and was not considered preferred until early 2015. - -Furthermore, the various container orchestration engines have not -standardized on a container networking implementation and may never. For -example, Flannel is the preferred container networking implementation for -Kubernetes but not for Docker Swarm. Each container networking implementation -comes with its own API abstractions, data model, tooling, etc.. Natively -supporting each container networking implementation can be a burden on the -Magnum community and codebase. By supporting only a subset of container -networking implementations, the project may not be widely adopted or may -provide a suboptimal user experience. - -Lastly, Magnum has limited support for advanced container networking -functionality. Magnum instantiates container networks behind the scenes -through Heat templates, exposing little-to-no user configurability. Some -users require the ability to customize their container environments, -including networking details. However, networking needs to "just work" for -users that require no networking customizations. - -Roles ------ - -The following are roles that the Magnum Container Networking Model takes -into consideration. Roles are an important reference point when creating -user stories. This is because each role provides different functions and -has different requirements. - -1. Cloud Provider (CP): Provides standard OpenStack cloud infrastructure - services, including the Magnum service. - -2. Container Service Provider (CSP): Uses Magnum to deliver - Containers-as-a-Service (CaaS) to users. CSPs are a consumer of CP - services and a CaaS provider to users. - -3. Users: Consume Magnum services to provision and manage clustered - container environments and deploy apps within the container clusters. - -The container ecosystem focuses on the developer user type. It is imperative -that the Magnum Container Networking Model meets the need of this user type. - -These roles are not mutually exclusive. For example: - -1. A CP can also be a CSP. In this case, the CP/CSP provisions and manages - standard OpenStack services, the Magnum service, and provides CaaS - services to users. - -2. A User can also be a CSP. In this case, the user provisions their own - baymodels, bays, etc. from the CP. - -Definitions ------------ - -COE - Container Orchestration Engine - -Baymodel - An object that stores template information about the bay which is - used to create new bays consistently. - -Bay - A Magnum resource that includes at least one host to run containers on, - and a COE to manage containers created on hosts within the bay. - -Pod - Is the smallest deployable unit that can be created, scheduled, and - managed within Kubernetes. - -Additional Magnum definitions can be found in the Magnum Developer -documentation [2]_. - -Use Cases ----------- - -This document does not intend to address each use case. The use cases are -provided as reference for the long-term development of the Magnum Container -Networking Model. - -As a User: - -1. I need to easily deploy containerized apps in an OpenStack cloud. - My user experience should be similar to how I deploy containerized apps - outside of an OpenStack cloud. - -2. I need to have containers communicate with vm-based apps that use - OpenStack networking. - -3. I need the option to preserve the container's IP address so I can - manage containers by IP's, not just ports. - -4. I need to block unwanted traffic to/from my containerized apps. - -5. I need the ability for my containerized apps to be highly available. - -6. I need confidence that my traffic is secure from other tenants traffic. - -As a CSP: - -1. I need to easily deploy a bay for consumption by users. The bay must - support the following: - - A. One or more hosts to run containers. - B. The ability to choose between virtual or physical hosts to run - containers. - C. The ability to automatically provision networking to containers. - -2. I need to provide clustering options that support different - container/image, formats and technologies. - -3. After deploying my initial cluster, I need the ability to provide ongoing - management, including: - - A. The ability to add/change/remove networks that containers connect to. - B. The ability to add/change/remove nodes within the cluster. - -4. I need to deploy a Bay without admin rights to OpenStack services. - -5. I need the freedom to choose different container networking tools and - techniques offered by the container ecosystem beyond OpenStack. - -As a CP: - -1. I need to easily and reliably add the Magnum service to my existing - OpenStack cloud environment. - -2. I need to easily manage (monitor, troubleshoot, etc..) the Magnum - service. Including the ability to mirror ports to capture traffic - for analysis. - -3. I need to make the Magnum services highly-available. - -4. I need to make Magnum services highly performant. - -5. I need to easily scale-out Magnum services as needed. - -6. I need Magnum to be robust regardless of failures within the container - orchestration engine. - -Proposed Changes -================ - -1. Currently, Magnum supports Flannel [6]_ as the only multi-host container - networking implementation. Although Flannel has become widely accepted - for providing networking capabilities to Kubernetes-based container - clusters, other networking tools exist and future tools may develop. - - This document proposes extending Magnum to support specifying a - container networking implementation through a combination of user-facing - baymodel configuration flags. Configuration parameters that are common - across Magnum or all networking implementations will be exposed as unique - flags. For example, a flag named network-driver can be used to instruct - Magnum which network driver to use for implementing a baymodel - container/pod network. network driver examples may include: - - flannel, weave, calico, midonet, netplugin, etc.. - - Here is an example of creating a baymodel that uses Flannel as the - network driver: :: - - magnum baymodel-create --name k8sbaymodel \ - --image-id fedora-21-atomic-5 \ - --keypair-id testkey \ - --external-network-id 1hsdhs88sddds889 \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.small \ - --docker-volume-size 5 \ - --coe kubernetes \ - --network-driver flannel - - If no network-driver parameter is supplied by the user, the baymodel is - created using the default network driver of the specified Magnum COE. - Each COE must support a default network driver and each driver must - provide reasonable default configurations that allow users to instantiate - a COE without supplying labels. The default network driver for each COE - should be consistent with existing Magnum default settings. Where current - defaults do not exist, the defaults should be consistent with upstream - network driver projects. - -2. Each network driver supports a range of configuration parameters that - should be observed by Magnum. This document suggests using an attribute - named "labels" for supplying driver-specific configuration parameters. - Labels consist of one or more arbitrary key/value pairs. Here is an - example of using labels to change default settings of the Flannel - network driver: :: - - magnum baymodel-create --name k8sbaymodel \ - --image-id fedora-21-atomic-5 \ - --keypair-id testkey \ - --external-network-id ${NIC_ID} \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.small \ - --docker-volume-size 5 \ - --coe kubernetes \ - --network-driver flannel \ - --labels flannel_network_cidr=10.0.0.0/8,\ - flannel_network_subnetlen=22,\ - flannel_backend=vxlan - - With Magnum's current implementation, this document would support - labels for the Kubernetes COE type. However, labels are applicable - beyond Kubernetes, as the Docker daemon, images and containers now - support labels as a mechanism for providing custom metadata. The labels - attribute within Magnum should be extended beyond Kubernetes pods, so a - single mechanism can be used to pass arbitrary metadata throughout the - entire system. A blueprint [2]_ has been registered to expand the scope - of labels for Magnum. This document intends on adhering to the - expand-labels-scope blueprint. - - Note: Support for daemon-labels was added in Docker 1.4.1. Labels for - containers and images were introduced in Docker 1.6.0 - - If the --network-driver flag is specified without any labels, default - configuration values of the driver will be used by the baymodel. These - defaults are set within the Heat template of the associated COE. Magnum - should ignore label keys and/or values not understood by any of the - templates during the baymodel operation. - - Magnum will continue to CRUD bays in the same way: - - magnum bay-create --name k8sbay --baymodel k8sbaymodel --node-count 1 - -3. Update python-magnumclient to understand the new Container Networking - Model attributes. The client should also be updated to support passing - the --labels flag according to the expand-labels-scope blueprint [2]_. - -4. Update the conductor template definitions to support the new Container - Networking Model attributes. - -5. Refactor Heat templates to support the Magnum Container Networking Model. - Currently, Heat templates embed Flannel-specific configuration within - top-level templates. For example, the top-level Kubernetes Heat - template [8]_ contains the flannel_network_subnetlen parameter. Network - driver specific configurations should be removed from all top-level - templates and instead be implemented in one or more template fragments. - As it relates to container networking, top-level templates should only - expose the labels and generalized parameters such as network-driver. - Heat templates, template definitions and definition entry points should - be suited for composition, allowing for a range of supported labels. This - document intends to follow the refactor-heat-templates blueprint [3]_ to - achieve this goal. - -6. Update unit and functional tests to support the new attributes of the - Magnum Container Networking Model. - -7. The spec will not add support for natively managing container networks. - Due to each network driver supporting different API operations, this - document suggests that Magnum not natively manage container networks at - this time and instead leave this job to native tools. References [4]_ [5]_ - [6]_ [7]_. - provide additional details to common labels operations. - -8. Since implementing the expand-labels-scope blueprint [2]_ may take a while, - exposing network functionality through baymodel configuration parameters - should be considered as an interim solution. - -Alternatives ------------- - - -1. Observe all networking configuration parameters, including labels - within a configuration file instead of exposing the labels attribute to - the user. - -2. Only support a single networking implementation such as Flannel. Flannel - is currently supported for the Kubernetes COE type. It can be ported to - support the swarm COE type. - -3. Add support for managing container networks. This will require adding - abstractions for each supported network driver or creating an - abstraction layer that covers all possible network drivers. - -4. Use the Kuryr project [10]_ to provide networking to Magnum containers. - Kuryr currently contains no documentation or code, so this alternative - is highly unlikely if the Magnum community requires a pluggable - container networking implementation in the near future. However, Kuryr - could become the long-term solution for container networking within - OpenStack. A decision should be made by the Magnum community whether - to move forward with Magnum's own container networking model or to wait - for Kuryr to mature. In the meantime, this document suggests the Magnum - community become involved in the Kuryr project. - -Data Model Impact ------------------ - -This document adds the labels and network-driver attribute to the baymodel -database table. A migration script will be provided to support the attribute -being added. :: - - +-------------------+-----------------+---------------------------------------------+ - | Attribute | Type | Description | - +===================+=================+=============================================+ - | labels | JSONEncodedDict | One or more arbitrary key/value pairs | - +-------------------+-----------------+---------------------------------------------+ - | network-driver | string | Container networking backend implementation | - +-------------------+-----------------+---------------------------------------------+ - -REST API Impact ---------------- - -This document adds the labels and network-driver attribute to the BayModel -API class. :: - - +-------------------+-----------------+---------------------------------------------+ - | Attribute | Type | Description | - +===================+=================+=============================================+ - | labels | JSONEncodedDict | One or more arbitrary key/value pairs | - +-------------------+-----------------+---------------------------------------------+ - | network-driver | string | Container networking backend implementation | - +-------------------+-----------------+---------------------------------------------+ - -Security Impact ---------------- - -Supporting more than one network driver increases the attack -footprint of Magnum. - -Notifications Impact --------------------- - -None - -Other End User Impact ---------------------- - -Most end users will never use the labels configuration flag -and simply use the default network driver and associated -configuration options. For those that wish to customize their -container networking environment, it will be important to understand -what network-driver and labels are supported, along with their -associated configuration options, capabilities, etc.. - -Performance Impact ------------------- - -Performance will depend upon the chosen network driver and its -associated configuration. For example, when creating a baymodel with -"--network-driver flannel" flag, Flannel's default configuration -will be used. If the default for Flannel is an overlay networking technique -(i.e. VXLAN), then networking performance will be less than if Flannel used -the host-gw configuration that does not perform additional packet -encapsulation to/from containers. If additional performance is required -when using this driver, Flannel's host-gw configuration option could be -exposed by the associated Heat template and instantiated through the labels -attribute. - -Other Deployer Impact ---------------------- - -Currently, container networking and OpenStack networking are different -entities. Since no integration exists between the two, deployers/operators -will be required to manage each networking environment individually. -However, Magnum users will continue to deploy baymodels, bays, containers, -etc. without having to specify any networking parameters. This will be -accomplished by setting reasonable default parameters within the Heat -templates. - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -Daneyon Hansen (danehans) - -Other contributors: -Ton Ngo (Tango) -Hongbin Lu (hongbin) - -Work Items ----------- - -1. Extend the Magnum API to support new baymodel attributes. -2. Extend the Client API to support new baymodel attributes. -3. Extend baymodel objects to support new baymodel attributes. Provide a - database migration script for adding attributes. -4. Refactor Heat templates to support the Magnum Container Networking Model. -5. Update Conductor template definitions and definition entry points to - support Heat template refactoring. -6. Extend unit and functional tests to support new baymodel attributes. - -Dependencies -============ - -Although adding support for these new attributes does not depend on the -following blueprints, it's highly recommended that the Magnum Container -Networking Model be developed in concert with the blueprints to maintain -development continuity within the project. - -1. Common Plugin Framework Blueprint [1]_. - -2. Expand the Scope of Labels Blueprint [9]_. - -3. Refactor Heat Templates, Definitions and Entry Points Blueprint [3]_. - -Testing -======= - -Each commit will be accompanied with unit tests. There will also be -functional tests which will be used as part of a cross-functional gate -test for Magnum. - -Documentation Impact -==================== - -The Magnum Developer Quickstart document will be updated to support the -configuration flags introduced by this document. Additionally, background -information on how to use these flags will be included. - -References -========== - -.. [1] https://blueprints.launchpad.net/magnum/+spec/common-plugin-framework -.. [2] http://docs.openstack.org/developer/magnum/ -.. [3] https://blueprints.launchpad.net/magnum/+spec/refactor-heat-templates -.. [4] https://github.com/docker/libnetwork/blob/master/docs/design.md -.. [5] https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/design/networking.md -.. [6] https://github.com/coreos/flannel -.. [7] https://github.com/coreos/rkt/blob/master/Documentation/networking.md -.. [8] https://github.com/openstack/magnum/blob/master/magnum/templates/kubernetes/kubecluster.yaml -.. [9] https://blueprints.launchpad.net/magnum/+spec/expand-labels-scope -.. [10] https://github.com/openstack/kuryr diff --git a/specs/container-volume-integration-model.rst b/specs/container-volume-integration-model.rst deleted file mode 100644 index ba5c3482..00000000 --- a/specs/container-volume-integration-model.rst +++ /dev/null @@ -1,500 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================= -Magnum Container Volume Integration Model -========================================= - -Launchpad Blueprint: - -https://blueprints.launchpad.net/magnum/+spec/magnum-integrate-with-cinder - -Storage is a key part of any computing system. Containers in particular have -the interesting characteristic that local storage by default is ephemeral: -any changes to the file system disappear when the container is deleted. This -introduces the need for persistent storage to retain and share data between -containers, and this is currently an active area of development in all -container orchestration engines (COE). - -As the component in OpenStack for managing COE's, Magnum must fully enable the -features for persistent storage in the COE's. To achieve this goal, we propose -in this specification to generalize the process for utilizing persistent -storage with containers so that it is applicable for different bay types. -Despite the complexity, we aim to maintain a good user experience by a simple -abstraction for working with various volume capabilities. For the rest of this -specification, we will use the term Volume to refer to persistent storage, and -Volume Driver as the plugin in a COE to support the particular persistent -storage. - -Problem Description -=================== - -Containers requires full life cycle management such as create, run, stop, -delete,... and a key operation is to manage the data - making the data -persistent, reusing the data, sharing data between containers, etc. -In this area, the support for container volume is undergoing rapid change -to bring more integration with open source software and third party -storage solutions. - -A clear evidence of this growth is the many plugin volume drivers [1]_ [4]_ -such as NFS, GlusterFS, EBS, etc. They provide different functionality, use -different storage backend and have different requirements. The COE's are -naturally motivated to be flexible and allow as many choices as possible for -the users with respect to the storage backend. Since Magnum's role is to -support the COE's within OpenStack, the goal is to be transparent and enable -these same storage backends for the COE's through the COE's lifecycle -operation. - -Currently, Magnum provides limited support for managing container volume -. The only option available is to specify the docker-volume-size for a -pre-allocated block storage in the COE to host the containers. Magnum -instantiates container volumes through Heat templates, exposing no other -mechanism to configure and operate on volumes. In practice, some users -require the ability to manage volumes easily in the COEs . - -Note that we are not proposing to create a new volume management interface -in Magnum. After the users create the baymodel and bays, we assume that the -users would manage the volumes through existing techniques: - -1. Log in to the COE, use COE specific CLI or GUI to manage volumes. - -2. Use native tools to manage volumes. - -The initial implementation will focus on OpenStack Cinder integration; as -other alternatives become available, contributors are welcome through -3rd-party maintained projects. - - -Definitions ------------ - -COE - Container Orchestration Engine - -Baymodel - An object that stores template information about the bay which is - used to create new bays consistently. - -Bay - A Magnum resource that includes at least one host to run containers on, - and a COE to manage containers created on hosts within the bay. - -Pod - Is the smallest deployable unit that can be created, scheduled, and - managed within Kubernetes. - -Volume - storage that is persistent - -Volume plugin - COE specific code that supports the functionality of a type of volume. - -Additional Magnum definitions can be found in the Magnum Developer -documentation[7]_ . - -Use Cases ----------- - -This document does not intend to address all use cases. We list below a number -of use cases for 3 different roles; they should be useful as reference for the -long-term development of the Magnum Container Volume Integration. - -As a User: - -As mentioned above, our goal is to preserve the user experience specific to -the COE in managing the volumes. Therefore, we expect the use cases for the -users will be fulfilled by the COE's themselves; Magnum will simply ensure -that the necessary supports are in place. - -1. I need to easily create volume for containers to use as persistent - data store. - -2. I need the ability to create and mount a data volume container for cross - container sharing. - -3. I need to mount a host directory as a data volume. - -4. I need to easily attach a known volume to container to use the - existing data. - -5. I need the ability to delete the volume. - -6. I need to list and view the details of the volume - -7. I need to modify the volume. - - -As a CSP: - -1. I need to easily deploy a bay for consumption by users. The bay must - support the following: - - A. One or more hosts to run containers. - B. The ability to choose between virtual or physical hosts to - run containers. - C. The ability to automatically enable volume plugins to containers. - -2. I need to provide clustering options that support different volume plugins - per COE. - -3. After deploying my initial cluster, I need the ability to provide lifecycle - management, including: - - A. The ability to add/remove volumes that containers used. - B. The ability to add/remove nodes within the cluster with the necessary - adjustment to the volumes - -As a CP: - -1. I need to easily and reliably add the Magnum service to my existing - OpenStack cloud environment. - -2. I need to make the Magnum services highly-available. - -3. I need to make Magnum services highly performant. - -4. I need to easily scale-out Magnum services as needed. - - -Proposed Changes -================ - -We propose extending Magnum as follows. - - - -1. The new attribute volume-driver for a baymodel specifies the volume backend - driver to use when deploying a bay. - - Volume drivers may include: - - rexray, flocker, nfs, glusterfs, etc.. - - Here is an example of creating a Docker Swarm baymodel that uses rexray [5]_ - [6]_ as the volume driver: :: - - - magnum baymodel-create --name swarmbaymodel \ - --image-id fedora-21-atomic-5 \ - --keypair-id testkey \ - --external-network-id 1hsdhs88sddds889 \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.small \ - --docker-volume-size 5 \ - --coe swarm\ - --network-driver flannel \ - --volume-driver rexray - - When a Swarm bay is created with this bay model, the REX-Ray storage - subsystem will be installed, configured and started on the Swarm nodes, - then the REX-Ray volume plugin will be registered in Docker. When a container - is created with rexray as the volume driver, the container will have full - access to the REX-Ray capabilities such as creating, mounting, deleting - volumes [6]_. REX-Ray in turn will interface with Cinder to manage the - volumes in OpenStack. - - Here is an example of creating a Kubernetes baymodel that uses Cinder [2]_ - [3]_ as the volume driver: :: - - magnum baymodel-create --name k8sbaymodel \ - --image-id fedora-21-atomic-5 \ - --keypair-id testkey \ - --external-network-id 1hsdhs88sddds889 \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.small \ - --docker-volume-size 5 \ - --coe kubernetes\ - --network-driver flannel \ - --volume-driver cinder - - When the Kubernetes bay is created using this bay model, the kubelet will be - configured so that an existing Cinder volume can be mounted in a pod by - specifying the volume ID in the pod manifest as follows: :: - - volumes: - - name: mysql-persistent-storage - cinder: - volumeID: bd82f7e2-wece-4c01-a505-4acf60b07f4a - fsType: ext4 - - - -Here is an example of creating a mesos baymodel that uses rexray as the -volume driver: :: - - magnum baymodel-create --name mesosbaymodel \ - --image-id ubuntu-mesos\ - --keypair-id testkey \ - --external-network-id 1hsdhs88sddds889 \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.small \ - --coe mesos\ - --network-driver docker \ - --volume-driver rexray - -When the mesos bay is created using this bay model, the mesos bay will be -configured so that an existing Cinder volume can be mounted in a container -by configuring the parameters to mount the cinder volume in the json file. :: - - "parameters": [ - { "key": "volume-driver", "value": "rexray" }, - { "key": "volume", "value": "redisdata:/data" } - ] - -If no volume-driver parameter is supplied by the user, the baymodel is -created using the default volume driver of the particular COE. -Magnum will provide a default volume driver for each COE as well as the -reasonable default configuration for each driver so that -users can instantiate a COE without supplying a volume driver and -associated labels. Generally the defaults should be consistent with upstream -volume driver projects. - -2. Each volume driver supports a range of configuration parameters that are - handled by the "labels" attribute. - - Labels consist of one or more arbitrary key/value pairs. - Here is an example of using labels to choose ¡°storage-provider¡± for - rexray driver. - Volume driver: :: - - magnum baymodel-create --name k8sbaymodel \ - --image-id fedora-21-atomic-5 \ - --keypair-id testkey \ - --external-network-id ${NIC_ID} \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.small \ - --docker-volume-size 5 \ - --coe kubernetes \ - --volume-driver rexray \ - --labels storage-provider=openstack \ - [, key2=value2...] - - - If the --volume-driver flag is specified without any labels, default - configuration values of the driver will be used by the baymodel. - - Magnum will validate the labels together with the driver specified before - creating the bay and will return an error if the validation fails. - - Magnum will continue to CRUD bays in the same way: - - magnum bay-create --name k8sbay --baymodel k8sbaymodel --node-count 1 - -3. Update python-magnumclient to handle the new container volume- - driver attributes. - -4. Update the conductor template definitions to support the new container - volume-driver model attributes. - -5. Refactor Heat templates to support the Magnum volume driver plugin. - Configurations specific to volume drivers should be - implemented in one or more template fragments. - Top-level templates should only - expose the labels and generalized parameters such as volume-driver. - Heat templates, template definitions and definition entry points should - be designed for composition, allowing for a range of supported labels. - -6. Update unit and functional tests to support the new attributes of the - Magnum container volume driver. - -7. Preserve the user experience by ensuring that any operation on volume will - be identical between a COE deployed by Magnum and a COE deployed by other - methods. - - -Alternatives ------------- - -1. Without the support proposed, the user will need to manually enable and - configure the volume plugin. This will require the user to log into the - nodes in the cluster and understand the low level infrastructure of the - cluster as deployed by the heat templates. -2. We can add full support for managing container volume in Magnum user - interface itself. This will require adding abstractions for each supported - COE volume plugins driver or creating an abstraction layer that covers all - possible COE volume drivers. - -Data Model Impact ------------------ - -This document adds the volume-driver attribute to the baymodel -database table. A migration script will be provided to support the attribute -being added. :: - - +-------------------+-----------------+---------------------------------------------+ - | Attribute | Type | Description | - +===================+=================+=============================================+ - +-------------------+-----------------+---------------------------------------------+ - | volume-driver | string | Container volume backend implementation | - +-------------------+-----------------+---------------------------------------------+ - -REST API Impact ---------------- - -This document adds volume-driver attribute to the BayModel -API class. :: - - +-------------------+-----------------+---------------------------------------------+ - | Attribute | Type | Description | - +===================+=================+=============================================+ - +-------------------+-----------------+---------------------------------------------+ - | volume-driver | string | Container volume backend implementation | - +-------------------+-----------------+---------------------------------------------+ - -Security Impact ---------------- - -Supporting volume drivers can potentially increase the attack surface -on containers. - -Notifications Impact --------------------- - -None - -Other End User Impact ---------------------- - -There is no impact if the user does not use a volume driver. -We anticipate that most users would not use the labels for volume -and would simply use the default volume driver and associated -configuration options. For those who wish to customize their -container volume driver environment, it will be important to understand -what volume-driver and labels are supported, along with their -associated configuration options, capabilities, etc.. - -Performance Impact ------------------- - -There is no impact if the user does not use a volume driver. -When a volume driver is used, the performance will depend upon the specific -volume driver and its associated storage backends. For example, Kubernetes -supports Cinder and awsEBS; the two types of volumes can have different -performance. - -An example of the second case is a docker swarm bay with -"--volume-driver rexray" where the rexray driver's storage provider is -OpenStack cinder. The resulting performance for container may vary depending -on the storage backends. As listed in [8]_ , Cinder supports many storage -drivers. Besides this, different container volume driver can also cause -performance variance. - - -High-Availability Impact ------------------------------- - - - -+-----------------+--------------------+--------------------------+ -| COE | Master HA | Pod/Container/App HA | -+=================+====================+==========================+ -| Kubernetes | No | Yes | -+-----------------+--------------------+--------------------------+ -| Docker Swarm | No | Yes | -+-----------------+--------------------+--------------------------+ -| Mesos | No | No | -+-----------------+--------------------+--------------------------+ - -"No" means that the volume doesn't affect the high-availability. -"Yes" means that the volume affect the high-availability. - -Kubernetes does support pod high-availability through the replication -controller, however, this doesn't work when a pod with volume attached -fails. Refer the link [11]_ for details. - -Docker swarm doesn't support the containers rescheduling when a node fails, so -volume can not be automatically detached by volume driver. Refer the -link [12]_ for details. - -Mesos supports the application high-availability when a node fails, which -means application would be started on new node, and volumes can be -automatically attached to the new node by the volume driver. - -Other Deployer Impact ---------------------- - -Currently, both Kubernetes and Docker community have supported some volume -plugins. The changes proposed will enable these volume plugins in Magnum. -However, Magnum users will be able to continue to deploy baymodels, bays, -containers, etc. without having to specify any parameters for volume. -This will be accomplished by setting reasonable default parameters within -the Heat templates. - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - -- Kai Qiang Wu (Kennan) - -Other contributors: - -- Qun Wang (wangqun) -- Ton Ngo (Tango) - - -Work Items ----------- - -1. Extend the Magnum API to support new baymodel attributes. -2. Extend the Client API to support new baymodel attributes. -3. Extend baymodel objects to support new baymodel attributes. Provide a - database migration script for adding attributes. -4. Refactor Heat templates to support the Magnum container volume driver. -5. Update Conductor template definitions and definition entry points to - support Heat template refactoring. -6. Extend unit and functional tests to support new baymodel attributes. -7. Document how to use the volume drivers with examples. - -Dependencies -============ - -Although adding support for these new attributes does not depend on the -following blueprints, it's highly recommended that the Magnum Container -Networking Model be developed in concert with the blueprints to maintain -development continuity within the project. -https://blueprints.launchpad.net/magnum/+spec/ubuntu-image-build - -Kubernetes with cinder support need Kubernetes version >= 1.1.1 -Swarm need version >= 1.8.3, as Kubernetes 1.1.1 upgraded to that version - -Testing -======= - -Each commit will be accompanied with unit tests. There will also be -functional tests which will be used as part of a cross-functional gate -test for Magnum. - -Documentation Impact -==================== - -The Magnum Developer Quickstart document will be updated to support the -configuration flags introduced by this document. Additionally, background -information on how to use these flags will be included. - -References -========== - -.. [1] http://kubernetes.io/v1.1/docs/user-guide/volumes.html -.. [2] http://kubernetes.io/v1.1/examples/mysql-cinder-pd/ -.. [3] https://github.com/kubernetes/kubernetes/tree/master/pkg/volume/cinder -.. [4] http://docs.docker.com/engine/extend/plugins/ -.. [5] https://github.com/emccode/rexray -.. [6] http://rexray.readthedocs.org/en/stable/user-guide/storage-providers/openstack -.. [7] http://docs.openstack.org/developer/magnum/ -.. [8] http://docs.openstack.org/liberty/config-reference/content/section_volume-drivers.html -.. [9] http://docs.openstack.org/admin-guide-cloud/blockstorage_multi_backend.html# -.. [10] http://docs.openstack.org/user-guide-admin/dashboard_manage_volumes.html -.. [11] https://github.com/kubernetes/kubernetes/issues/14642 -.. [12] https://github.com/docker/swarm/issues/1488 diff --git a/specs/containers-service.rst b/specs/containers-service.rst deleted file mode 100644 index ca48b393..00000000 --- a/specs/containers-service.rst +++ /dev/null @@ -1,400 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================== -Containers Service -================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/nova/+spec/containers-service - -Containers share many features in common with Nova instances. For the common -features, virt drivers for Nova can be used to surface basic instance -functionality. For features that go beyond what can be naturally fit within -a virt driver, we propose a new API service that allows for advanced features -to be added without conflating the worlds of instances and containers. - -Some examples of containers specific features are setting of shell environment -variables, and accepting a shell command to execute at runtime. Capturing the -STDIO of the process(es) within a container, and tracking the return status -of processes are all beyond the scope of what was contemplated for Nova. All -of these features will be implemented in the Containers Service. - - -Problem description -=================== -Container technology is rapidly gaining popularity as a way to bundle and -deploy applications. Recognizing and adapting to this trend will position -OpenStack to be useful not only to clouds that employ bare metal and virtual -machine instances, but can remain competitive in offering container services -as well. - -Nova's concepts of an instance, and the actions that may be taken on it do not -match completely with containers. - -Use cases ---------- -1. App Consolidation. End-user wants to run multiple small applications in - separate operating system environments, but wants to optimize for efficiency - to control hosting costs. Each application belongs to the same tenant, so - security isolation between applications is nice-to-have but not critical. - Isolation is desired primarily for simplified management of the execution - environment for each application. -2. App Portability. End-user wants to create a single container image, and - deploy the same image to multiple hosting environments, including OpenStack. - Other environments may include local servers, dedicated servers, private - clouds, and public clouds. Switching environments requires passing database - connection strings by environment variables at the time a container starts - to allow the application to use the services available in each environment - without changing the container image. -3. Docker Compatibility. End-user has a Dockerfile used to build an application - and its runtime environment and dependencies in a Docker container image. - They want an easy way to run the Docker resulting image on an OpenStack - cloud. -4. LXC Compatibility. End-user wants an easy way to remotely create multiple - LXC containers within a single Nova instance. -5. OpenVZ Compatibility. End-user wants an easy way to remotely create multiple - OpenVZ containers within a single Nova instance. -6. Containers-Centric World View. End-user wants to communicate with a single - OpenStack API, and request the addition of containers, without the need to - be concerned with keeping track of how many containers are already running - on a given Nova instance, and when more need to be created. They want to - simply create and remove containers, and allow the appropriate resource - scheduling to happen automatically. -7. Platform Integration. Cloud operator already has an OpenStack cloud, and - wants to add a service/application centric management system on top. - Examples of such systems are Cloud Foundry, Kubernetes, Apache Mesos, etc. - The selected system is already Docker compatible. Allow this cloud operator - easy integration with OpenStack to run applications in containers. The - Cloud Operator now harnesses the power of both the management system, and - OpenStack, and does not need to manage a second infrastructure for his/her - application hosting needs. All details involving the integration of - containers with Nova instances is managed by OpenStack. -8. Container network. End-user wants to define a custom overlay network for - containers, and wants to have admin privilege to manage the network - topology. Building a container network can decouple application deployment - and management from the underlying network infrastructure, and enable - additional usage scenario, such as (i) software-defined networking, and - (ii) extending the container network (i.e. connecting various resources from - multiple hosting environments). End-users want a single service that could - help them build the container network, and dynamically modify the network - topology by adding or removing containers to or from the network. -9. Permit secure use of native REST APIs. Provide two models of operation with - Magnum. The first model allows Magnum to manage the lifecycle of Pods, - ReplicationControllers, and Services. The second model allows end-users to - manage the lifecycle of Pods, ReplicationControllers, and Services by - providing direct secure access to the native ReST APIs in Kubernetes and - possibly Docker. - -Long Term Use Cases -------------------- -These use cases have been identified by the community as important, but -unlikely to be tackled in short term (especially prior to incubation). We wish -to adapt to these use cases in long term, but this is not a firm project -commitment. - -1. Multi-region/multi-cloud support. End-user wants to deploy applications to - multiple regions/clouds, and dynamically relocate deployed applications - across different regions/clouds. In particular, they want a single service - that could help them (i) provision nodes from multiple regions/clouds, thus - running containers on top of them, and (ii) dynamically relocate containers - (e.g. through container migration) between nodes regardless of the - underlying infrastructure. - -Proposed change -=============== -Add a new API service for CRUD and advanced management of containers. -If cloud operators only want to offer basic instance features for their -containers, they may use nova with an alternate virt-driver, such as -libvirt/lxc or nova-docker. For those wanting a full-featured container -experience, they may offer the Containers Service API as well, in combination -with Nova instances that contain an OpenStack agent that connects to the -containers service through a security controlled agent (daemon) that allows -the OpenStack control plane to provision and control containers running on -Compute Hosts. - -The Containers Service will call the Nova API to create one or more Nova -instances inside which containers will be created. The Nova instances may -be of any type, depending on the virt driver(s) chosen by the cloud operator. -This includes bare-metal, virtual machines, containers, and potentially other -instance types. - -This allows the following configurations of containers in OpenStack. - -* Containers in Virtual Machine Instances -* Containers in Bare Metal Instances -* Containers in Container Instances (nested) - -The concept of nesting containers is currently possible if the parent container -runs in privileged mode. Patches to the linux kernel are being developed to -allow nesting of non-privileged containers as well, which provides a higher -level of security. - -The spirit of this plan aims to duplicate as little as possible between Nova -and the Containers Service. Common components like the scheduler are expected -to be abstracted into modules, such as Gantt that can be shared by multiple -projects. Until Gantt is ready for use by the Containers Service, we will -implement only two provisioning schemes for containers: - -1. Create a container on a specified instance by using a nova instance guid. -2. Auto-create instances (applies only until the Gantt scheduler is used) - 2.1. Fill them sequentially until full. - 2.2. Remove them automatically when they become empty. - -The above orchestration will be implemented using Heat. This requires some -kind of hypervisor painting (such as host aggregates) for security reasons. - -The diagram below offers an overview of the system architecture. The OSC box -indicates an OpenStack client, which will communicate with the Containers -Service through a REST API. The containers service may silently create Nova -instances if one with enough capacity to host the requested container is not -already known to the Containers service. The containers service will maintain -a database "Map" of containers, and what Nova instance each belongs to. Nova -creates instances. Instances are created in Nova, and containers belong only -to the Containers Service, and run within a Nova instance. If the instance -includes the agent software "A", then it may be included in the inventory of -the Containers service. Instances that do not contain an agent may not interact -with the Containers Service, and can be controlled only by a Nova virt driver. - -:: - -                            +---------+ -                            |   OSC   | -                            +----+----+ -                                 | -                            +----+----+ - +-------- Nova -------+  +-+  REST   +-- Containers -+ - |                     |  | +---------+    Service    | - |                     |  |                           | - |           +-------+ +--+ +-----+                   | - |           | Gantt | |  | | Map |                   | - |           +-------+ |  | +-----+                   | - |                     |  |                           | - +-----------+---------+  +---------------+-----------+ -             |                            |             - +-----------+----+ Compute Host ---------|-----------+ - |                                    +---+---+       | - |                               +----+ Relay +---+   | - |                               |    +-------+   |   | - |                               |                |   | - | +-- Instance --+ +-- Instance |-+ +-- Instance |-+ | - | |              | |            | | |            | | | - | |              | |        +---+ | |        +---+ | | - | |              | |        |   | | |        |   | | | - | |              | |        | A | | |        | A | | | - | |              | |        |   | | |        |   | | | - | |              | |        +---+ | |        +---+ | | - | |              | |              | |              | | - | |              | | +---+  +---+ | | +---+  +---+ | | - | |              | | |   |  |   | | | |   |  |   | | | - | |              | | | C |  | C | | | | C |  | C | | | - | |              | | |   |  |   | | | |   |  |   | | | - | |              | | +---+  +---+ | | +---+  +---+ | | - | |              | |              | |              | | - | +--------------+ +--------------+ +--------------+ | - |                                                    | - +----------------------------------------------------+ - +---+ - | | - | A | = Agent - | | - +---+ - +---+ - | | - | C | = Container - | | - +---+ - - -Design Principles ------------------ -1. Leverage existing OpenStack projects for what they are good at. Do not - duplicate functionality, or copy code that can be otherwise accessed through - API calls. -2. Keep modifications to Nova to a minimum. -3. Make the user experience for end users simple and familiar. -4. Allow for implementation of all features containers are intended to offer. - - -Alternatives ------------- - -1. Extending Nova's existing feature set to offer container features -1.1. Container features don't fit into Nova's idea of compute (VM/Server) -2. A completely separate containers service forked from Nova. -2.1. Would result in large overlap and duplication in features and code - - -Data model impact ------------------ -For Nova, None. All new data planned will be in the Containers Service. - - -REST API impact ---------------- -For Nova, none. All new API calls will be implemented in the Containers -Service. The OpenStack Containers Service API will be a superset of -functionality offered by the, The `Docker Remote API: -`_ -with additionals to make is suitable for general use regardless of the backend -container technology used, and to be compatible with OpenStack multi-tenancy -and Keystone authentication. - -Specific Additions: - -1. Support for the X-Auth-Project-Id HTTP request header to allow for - multi-tenant use. -2. Support for the X-Auth-Token HTTP request header to allow for authentication - with keystone. - -If either of the above headers are missing, a 401 Unauthorized response will -be generated. - -Docker CLI clients may communicate with a Swarmd instance that is configured -to use the OpenStack Containers API as the backend for libswarm. This will -allow for tool compatibility with the Docker ecosystem using the officially -supported means for integration of a distributed system. - -The scope of the full API will cause this spec to be too long to review, so -the intent is to deal with the specific API design as a series of Gerrit -reviews that submit API code as Not Implemented stubs with docstrings that -clearly document the design, so allow for approval, and further implementation. - -Security impact ---------------- -Because Nova will not be changed, there should be no security impacts to Nova. -The Containers Service implementation, will have the following security related -issues: - -* Need to authenticate against keystone using python-keystoneclient. -* A trust token from Nova will be needed in order for the Containers Service - to call the Nova API on behalf of a user. -* Limits must be implemented to control resource consumption in accordance with - quotas. -* Providing STDIO access may generate a considerable amount of network chatter - between containers and clients through the relay. This could lead to - bandwidth congestion at the relays, or API nodes. An approach similar to - how we handle serial console access today will need to be considered to - mitigate this concern. - -Using containers implies a range of security considerations for cloud -operators. These include: - -* Containers in the same instance share an operating system. If the kernel is - exploited using a security vulnerability, processes in once container may - escape the constraints of the container and potentially access other - resources on the host, including contents of other containers. -* Output of processes may be persisted by the containers service in order to - allow asynchronous collection of exit status, and terminal output. Such - content may include sensitive information. Features may be added to mitigate - the risk of this data being replicated in log messages, including errors. -* Creating containers usually requires root access. This means that the Agent - may need to be run with special privileges, or be given a method to - escalate privileges using techniques such as sudo. -* User provided data is passed through the API. This will require sensible - data input validation. - - -Notifications impact --------------------- - -Contemplated features (in subsequent release cycles): - -* Notify the end user each time a Nova instance is created or deleted by - the Containers service, if (s)he has registered for such notifications. -* Notify the user each on CRUD of containers containing start and end - notifications. (compute.container.create/delete/etc) -* Notify user periodically of existence of container service managed - containers (ex compute.container.exists) - - -Other end user impact ---------------------- - -The user interface will be a REST API. On top of that API will be an -implementation of the libswarm API to allow for tools designed to use Docker -to treat OpenStack as an upstream system. - - -Performance Impact ------------------- - -The Nova API will be used to create instances as needed. If the Container to -Instance ratio is 10, then the Nova API will be called at least once for every -10 calls to the Containers Service. Instances that are left empty will be -automatically deleted, so in the example of a 10:1 ratio, the Nova API will be -called to perform a delete for every 10 deletes in the Container Service. -Depending on the configuration, the ratio may be as low as 1:1. -The Containers Service will only access Nova through its API, not by accessing -its database. - - - -Other deployer impact ---------------------- - -Deployers may want to adjust the default flavor used for Nova Instances created -by the Containers Service. - -There should be no impact on users of prior releases, as this introduces a new -API. - -Developer impact ----------------- - -Minimal. There will be minimal changes required in Nova, if any. - - -Implementation -============== - - -Assignee(s) ------------ - -Primary assignee: -aotto - -Other contributors: -andrew-melton -ewindisch - - -Work Items ----------- - -1. Agent -2. Relay -3. API Service -4. IO Relays - - -Dependencies -============ - -1. -2. Early implementations may use libswarm, or a python port of libswarm to - implement Docker API compatibility. - -Testing -======= - -Each commit will be accompanied with unit tests, and Tempest functional tests. - - -Documentation Impact -==================== - -A set of documentation for this new service will be required. - - -References -========== - -* Link to high level draft proposal from the Nova Midcycle Meetup for Juno: - `PDF `_ -* `Libswarm Source `_ diff --git a/specs/create-trustee-user-for-each-bay.rst b/specs/create-trustee-user-for-each-bay.rst deleted file mode 100644 index bdef4866..00000000 --- a/specs/create-trustee-user-for-each-bay.rst +++ /dev/null @@ -1,186 +0,0 @@ -================================== -Create a trustee user for each bay -================================== - -https://blueprints.launchpad.net/magnum/+spec/create-trustee-user-for-each-bay - -Some services which are running in a bay need to access OpenStack services. -For example, Kubernetes load balancer [1]_ needs to access Neutron. Docker -registry [2]_ needs to access Swift. In order to access OpenStack services, -we can create a trustee for each bay and delegate a limited set of rights to -the trustee. [3]_ and [4]_ give a brief introduction to Keystone's trusts -mechanism. - -Problem description -=================== - -Some services which are running in a bay need to access OpenStack services, -so we need to pass user credentials into the vms. - -Use Cases ---------- - -1. Kubernetes load balancer needs to access Neutron [1]_. -2. For persistent storage, Cloud Provider needs to access Cinder to - mount/unmount block storage to the node as volume [5]_. -3. TLS cert is generated in the vms and need to be uploaded to Magnum [6]_ and - [7]_. -4. Docker registry needs to access Swift [2]_. - -Project Priority ----------------- - -High - -Proposed change -=============== -When a user (the "trustor") wants to create a bay, steps for trust are as -follows. - -1. Create a new service account (the "trustee") without any role in a domain - which is dedicated for trust. Without any role, the service account can do - nothing in Openstack. - -2. Define a trust relationship between the trustor and the trustee. The trustor - can delegate a limited set of roles to the trustee. We can add an option - named trust_roles in baymodel. Users can add roles which they want to - delegate into trust_roles. If trust_roles is not provided, we delegate all - the roles to the trustee. - -3. Services in the bay can access OpenStack services with the trustee - credentials and the trust. - -The roles which are delegated to the trustee should be limited. If the services -in the bay only need access to Neutron, we should not allow the services to -access to other OpenStack services. But there is a limitation that a trustor -must have the role which is delegated to a trustee [4]_. - -Magnum now only allows the user who create the bay to get the certificate to -avoid the security risk introduced by Docker [8]_. For example, if other users -in the same tenant can get the certificate, then they can use Docker API to -access the host file system of a bay node and get anything they want:: - - docker run --rm -v /:/hostroot ubuntu /bin/bash \ - -c "cat /hostroot/etc/passwd" - -If Keystone doesn't allow to create new service accounts when LDAP is used as -the backend for Keystone, we can use a pre-create service account for all -bays. In this situation, all the bays use the same service account and -different trust. We should add an config option to choose this method. - -Alternatives ------------- - -Magnum can create a user for each bay with roles to access OpenStack Services -in a dedicated domain. The method has one disadvantage. The user which is -created by magnum may get the access to OpenStack services which this user can -not access before. For example, a user can not access Swift service and create -a bay. Then Magnum create a service account for this bay with roles to access -Swift. If the user logins into the vms and get the credentials, the user can -use these credentials to access Swift. - -Or Magnum doesn't prepare credentials and the user who create a bay needs to -login into the nodes to manully add credentials in config files for services. - -Data model impact ------------------ - -Trustee id, trustee password and trust id are added to Bay table in Magnum -database. - -REST API impact ---------------- - -Only the user who create a bay can get the certificate of this bay. Other -users in the same tenant can not get the certificate now. - -Security impact ---------------- - -Trustee id and trustee password are encrypted in magnum database. When Magnum -passes these parameters to heat to create a stack, the transmission is -encrypted by tls, so we don't need to encrypt these credentials. These -credentials are hidden in heat, users can not query them in stack parameters. - -Trustee id, trustee password and trust id can be obtained in the vms. Anyone -who can login into the vms can get them and use these credentials to access -OpenStack services. In a production environment, these vms must be secured -properly to prevent unauthorized access. - -Only the user who create the bay can get the certificate to access the COE -api, so it is not a security risk even if the COE api is not safe. - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - humble00 (wanghua.humble@gmail.com) -Other contributors: - None - -Work Items ----------- - -1. Create an trustee for each bay. -2. Change the policy so that only the user who create a bay can get the - certificate of the bay. - -Dependencies -============ - -None - -Testing -======= - -Unit test and functional test for service accounts and the policy change. - -Documentation Impact -==================== - -The user guide and troubleshooting guide will be updated with details -regarding the service accounts. - -References -========== -.. [1] http://docs.openstack.org/developer/magnum/dev/kubernetes-load-balancer.html -.. [2] https://blueprints.launchpad.net/magnum/+spec/registryv2-in-master -.. [3] http://blogs.rdoproject.org/5858/role-delegation-in-keystone-trusts -.. [4] https://wiki.openstack.org/wiki/Keystone/Trusts -.. [5] https://github.com/kubernetes/kubernetes/blob/release-1.1/examples/mysql-cinder-pd/README.md -.. [6] https://bugs.launchpad.net/magnum/+bug/1503863 -.. [7] https://review.openstack.org/#/c/232152/ -.. [8] https://docs.docker.com/engine/articles/security/#docker-daemon-attack-surface - -History -======= - -None diff --git a/specs/flatten_attributes.rst b/specs/flatten_attributes.rst deleted file mode 100644 index 456a880f..00000000 --- a/specs/flatten_attributes.rst +++ /dev/null @@ -1,307 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================== -Flatten Cluster and ClusterTemplate Attributes -============================================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/magnum/+spec/flatten-attributes - -Including all user-specified attributes in Clusters and ClusterTemplates will -increase flexibility for users during ClusterTemplate definition and Cluster -creation. - -Note that this spec only deals with changes to magnum's data model, not -API changes. Please see the NodeGroup spec for these details: - -https://blueprints.launchpad.net/magnum/+spec/nodegroups - - -Problem Description -=================== - -Clusters rely on attributes from both the magnum Cluster and ClusterTemplate -resources, but the line between attributes that belong in one or the other is -not well-defined. Most attributes make sense where they are, but there will be -times that users will want to capture different attributes in a ClusterTemplate -or specify them during cluster creation. The current system has little -flexibility, with only keypairs able to exist in either. - -Use Cases -========= - -1. Users that want to specify attributes in ClusterTemplates that they can't - right now, such as node count. - -2. Users that want to specify/override attributes when creating a Cluster that - they can't right now, since attributes that come from ClusterTemplates are - currently unchangeable. - - -Proposed Change -=============== - -Give both Cluster and ClusterTemplate a copy of all user-specifed attributes. - -The python object for ClusterTemplate will work much the same, just with more -attributes available. - -The python object for Cluster will no longer (and should not) need to use -attributes from its ClusterTemplate, since it will have all the attributes it -needs and it is possible that some attributes will have been overridden in the -cluster-create request. - -For example, `cluster.cluster_template.fixed_network` will become -`cluster.fixed_network`. - - -Alternatives -============ - -The shared fields can be added to the existing Cluster and ClusterTemplate -tables. This achieves the same effect, but brings with it the burden of -maintaining two sets of the same fields in different tables. - - -Data Model Impact -================= - -A new database table, ClusterAttributes, will be added. The shared fields will -be moved to this table. - -A foreign key to ClusterAttributes will be added to the Cluster and -ClusterTemplate tables. The relationship between Cluster and ClusterAttributes -is one-to-one. The same is true between ClusterTemplate and ClusterAttributes. -That is, Clusters and ClusterTemplates have their own separate copy of cluster -attributes. - -Database tables before, with fields that will be shared marked: - - cluster: - - =================== ======= - Attribute Shared? - ------------------- ------- - id - uuid - project_id - user_id - name - stack_id - status - status_reason - api_address - trust_id - trustee_username - trustee_user_id - trustee_password - coe_version - container_version - ca_cert_ref - magnum_cert_ref - cluster_template_id - node_addresses - master_addresses - create_timeout Yes - discovery_url Yes - node_count Yes - master_count Yes - keypair Yes - =================== ======= - - cluster_template: - - ===================== ======= - Attribute Shared? - --------------------- ------- - id - uuid - project_id - user_id - name - public - apiserver_port Yes - keypair_id Yes - labels Yes - external_network_id Yes - fixed_network Yes - fixed_subnet Yes - network_driver Yes - volume_driver Yes - dns_nameserver Yes - coe Yes - http_proxy Yes - https_proxy Yes - no_proxy Yes - registry_enabled Yes - tls_disabled Yes - insecure_registry Yes - master_lb_enabled Yes - floating_ip_enabled Yes - image_id Yes - flavor_id Yes - docker_volume_size Yes - docker_storage_driver Yes - cluster_distro Yes - server_type Yes - master_flavor_id Yes - ===================== ======= - -Database tables after: - - cluster: - - id - - uuid - - project_id - - user_id - - name - - stack_id - - status - - status_reason - - api_address - - trust_id - - trustee_username - - trustee_user_id - - trustee_password - - coe_version - - container_version - - ca_cert_ref - - magnum_cert_ref - - cluster_template_id - - node_addresses - - master_addresses - - FK to cluster_attributes (new) - - cluster_template: - - id - - uuid - - project_id - - user_id - - name - - public - - FK to cluster_attributes (new) - - cluster_attributes: - - id (new) - - apiserver_port - - create_timeout - - discovery_url - - node_count - - master_count - - keypair_id - - labels - - external_network_id - - fixed_network - - fixed_subnet - - network_driver - - volume_driver - - dns_nameserver - - coe - - http_proxy - - https_proxy - - no_proxy - - registry_enabled - - tls_disabled - - insecure_registry - - master_lb_enabled - - floating_ip_enabled - - image_id - - flavor_id - - docker_volume_size - - docker_storage_driver - - cluster_distro - - server_type - - master_flavor_id - - -REST API Impact -=============== - -None - -Security Impact -=============== - -None identified - - -Notifications Impact -==================== - -None - - -Other End-user Impact -===================== - -None - - -Performance Impact -================== - -Negligible. Two-table joins should have minimal performance impact. There may -be cases where only the Cluster/ClusterTemplate or ClusterAttributes table -needs to be queried/written that will further offset the small performance -impact or even improve performance since these operations will be dealing with -narrower tables. - - -Other Deployer Impact -===================== - -This change will require a database migration. - - -Developer Impact -================ - -Developers will not have to remember which attributes come from ClusterTemplate -because they will all be available in Cluster. - - -Implementation -============== - -Assignee(s) ------------ - -Spyros Trigazis (strigazi) - - -Work Items ----------- - -1. Database migration to add ClusterAttributes table. - -2. Updates to python code. - - -Dependencies -============ - -None - - -Testing -======= - -Unit tests will need to be updated, but functional tests will still pass as -this is an internal change. - - -Documentation Impact -==================== - -None for this spec, as the changes are internal. - - -References -========== - -None diff --git a/specs/magnum-horizon-plugin.rst b/specs/magnum-horizon-plugin.rst deleted file mode 100644 index 65bb3c2a..00000000 --- a/specs/magnum-horizon-plugin.rst +++ /dev/null @@ -1,171 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=================================== -Web Interface for Magnum in Horizon -=================================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/magnum/+spec/magnum-horizon-plugin - -Currently there is no way for a user to interact with Magnum through a web -based user interface, as they are used to doing with other OpenStack -components. This implementation aims to introduce this interface as an -extension of Horizon (the OpenStack Dashboard) and expose all the features of -Magnum in a way familiar to users. - -Problem description -=================== - -In order to increase adoption and usability of Magnum we need to introduce a UI -component for users and administrators to interact with Magnum without the need -to use the command line. The UI proposed to be built will model all of the -features currently available in the Magnum REST API and built using the Horizon -plugin architecture to remain in line with other OpenStack UI projects and -minimise the amount of new code that needs to be added. - -Use Cases ----------- -1. An end user wanting to use Magnum with OpenStack who is not comfortable in - issuing commands with the python client will use the web user interface to - interact with Magnum. -2. An administrator may use the user interface to provide a quick overview of - what Magnum has deployed in their OpenStack environment. - -Proposed change -=============== - -The first step will be to extend the Horizon API to include CRUD operations -that are needed to interact with Magnum. Assuming that there are no issues here -and API changes/additions are not required to Magnum, we can begin to -design/implement the interface. We will aim to minimize the amount of Magnum -specific UI code that will need to be maintained by reusing components from -Horizon. This will also speed up the development significantly. - -It is suggested the initial implementation of Magnum UI will include basic CRUD -operations on BayModel and Bay resources. This will be the starting point for -development and upon completion this will represent version 1. - -Future direction includes adding CRUD operations for other Magnum features -(Pod, Container, Service, ReplicationController) and will be tracked by new -blueprints as they represent significant additional effort. The ultimate goal, -a user should be able to perform all normal interactions with Magnum through -the UI with no need for interaction with the python client. - -Suggestions for further improvement include visualising Magnum resources to -provide a quick overview of how resources are deployed. - -Bugs/Blueprints relating specifically to the Magnum UI will be tracked here: - -https://launchpad.net/magnum-ui - -Mockups/Designs will be shared using the OpenStack Invision account located -here: - -https://openstack.invisionapp.com - -Alternatives ------------- - -One alternative to this approach is to develop an entirely separate UI -specifically for Magnum. We will not use this approach as it does not fall in -line with how other projects are managing their user interfaces and this -approach would ultimately result in a significantly larger effort with much -duplication with Horizon. - -Data model impact ------------------ - -None - -REST API impact ---------------- - -For Magnum, none. The Horizon API will need to be extended to include Create, -Read, Update, Delete operations for all features available in the Magnum REST -API. However, this extension to the Horizon API will live in the Magnum UI tree -not the upstream Horizon tree. - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -The Magnum API will be called from the user interface to return information to -the user about the current state of Magnum objects and perform new interactions -with Magnum. For every action a user performs from the user interface at least -one API call to Magnum will need to be made. - -Other deployer impact ---------------------- - -As the Magnum user interface will be managed and stored outside of the Horizon -project deployers will need to pull down the Magnum UI code and add this to -their Horizon install. - -In order to add the Magnum UI to Horizon the deployer will have to copy an -enable file to openstack_dashboard/local/enabled/ in their Horizon directory -and then run Horizon as they would normally. - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - bradjones - -Work Items ----------- - -1. Extend Horizon API in include Magnum calls -2. CRUD operations on BayModel and Bay resources -3. CRUD operations on other Magnum features (Pod, Container, Service, etc.) -4. Refine the user experience - -Dependencies -============ - -None - -Testing -======= - -Each commit will be accompanied with unit tests. There will also be functional -tests which will be used as part of a cross-functional gate test for Magnum. -This additional gate test will be non-voting as failures will not indicate -issues with Magnum but instead serves as advanced warning of any changes that -could potentially break the UI. - -Documentation Impact -==================== - -An installation guide will be required. - -References -========== - -None diff --git a/specs/open-dcos.rst b/specs/open-dcos.rst deleted file mode 100644 index b450c0d7..00000000 --- a/specs/open-dcos.rst +++ /dev/null @@ -1,177 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================= -Magnum and Open DC/OS Integration -================================= - -Launchpad Blueprint: - -https://blueprints.launchpad.net/magnum/+spec/mesos-dcos - -Open DC/OS [1]_ is a distributed operating system based on the Apache Mesos -distributed systems kernel. It enables the management of multiple machines as -if they were a single computer. It automates resource management, schedules -process placement, facilitates inter-process communication, and simplifies -the installation and management of distributed services. Its included web -interface and available command-line interface (CLI) facilitate remote -management and monitoring of the cluster and its services. - -Open DC/OS now supports both docker containerizer and mesos containerizer. -The mesos containerizer support both docker and AppC image spec, the mesos -containerizer can manage docker containers well even if docker daemon is not -running. - -End user can install Open DC/OS with different ways, such as vagrant, cloud, -local etc. For cloud, the Open DC/OS only supports AWS now, end user can -deploy a DC/OS cluster quickly with a template. For local install, there -are many steps to install a Open DC/OS cluster. - -Problem Description -=================== - -COEs (Container Orchestration Engines) are the first class citizen in Magnum, -there are different COEs in Magnum now including Kubernetes, Swarm and Mesos. -All of those COEs are focusing docker container management, the problem is -that the concept of container is not only limited in docker container, but -also others, such as AppC, linux container etc, Open DC/OS is planning to -support different containers by leveraging Mesos unified container feature -and the Open DC/OS has a better management console for container orchestration. - -Currently, Magnum provides limited support for Mesos Bay as there is only one -framework named as Marathon running on top of Mesos. Compared with Open DC/OS, -the current Mesos Bay lack the following features: - -1. App Store for application management. The Open DC/OS has a universe to - provide app store functions. - -2. Different container technology support. The Open DC/OS support different - container technologies, such as docker, AppC etc, and may introduce OCI - support in future. Introducing Open DC/OS Bay can enable Magnum to support - more container technologies. - -3. Better external storage integration. The Open DC/OS is planning to introduce - docker volume isolator support in next release, the docker volume isolator - is leveraging docker volume driver API to integrate with 3rd party - distributed storage platforms, such as OpenStack Cinder, GlusterFS, Ceph - etc. - -4. Better network management. The Open DC/OS is planning to introduce CNI - network isolator in next release, the CNI network isolator is leveraging CNI - technologies to manage network for containers. - -5. Loosely coupled with docker daemon. The Open DC/OS can work well for docker - container even if docker daemon is not running. The docker daemon now have - some issues in large scale cluster, so this approach avoids the limitation - of the docker daemon but still can enable end user get some docker features - in large scale cluster. - - -Proposed Changes -================ - -We propose extending Magnum as follows. - -1. Leverage bay driver work and structure this new COE as a bay driver. - -2. Leverage mesos-slave-flags [3]_ to customize Open DC/OS. - - Here is an example of creating an Open DC/OS baymodel that uses - docker/volume as isolator, linux as launcher and docker as image - provider: :: - - magnum baymodel-create --name dcosbaymodel \ - --image-id dcos-centos-7.2 \ - --keypair-id testkey \ - --external-network-id 1hsdhs88sddds889 \ - --dns-nameserver 8.8.8.8 \ - --flavor-id m1.small \ - --docker-volume-size 5 \ - --coe dcos \ - --labels isolation=docker/volume,\ - launcher=linux, \ - image_providers=docker - - Magnum will validate the labels together with the driver specified before - creating the bay and will return an error if the validation fails. - - Magnum will continue to CRUD bays in the same way: - - magnum bay-create --name dcosbay --baymodel dcosbaymodel --node-count 1 - -3. Keep the old Mesos Bay and add a new Open DC/OS Bay. Once the Open DC/OS Bay - is stable, deprecate the Mesos Bay. - -4. Update unit and functional tests to support Open DC/OS Bay, it is also an - option to verify the Open DC/OS Bay in gate. - -5. Preserve the user experience by ensuring that any operation on Open DC/OS - Bay will be identical between a COE deployed by Magnum and a COE deployed - by other methods. - - -REST API Impact ---------------- - -There will be no REST API exposed from Magnum for end user to operate Open -DC/OS, end user can logon to Open DC/OS dashboard or call Open DC/OS REST -API directly to manage the containers or the applications. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - -- Guang Ya Liu (jay-lau-513) - -Other contributors: - -- Qun Wang (wangqun) -- Gao Jin Cao - - -Work Items ----------- - -1. Build VM image for Open DC/OS Bay. -2. Add Open DC/OS Bay driver. -3. Add Heat template for Open DC/OS Bay. -4. Add Open DC/OS Bay monitor. -5. Document how to use the Open DC/OS Bay. - -Dependencies -============ - -1. This blueprint will focus on running on Open DC/OS in CentOS 7.2. - -2. Depend on blueprint - -https://blueprints.launchpad.net/magnum/+spec/mesos-slave-flags - -Testing -======= - -Each commit will be accompanied with unit tests. There will also be -functional tests which will be used as part of a cross-functional gate -test for Magnum. - -Documentation Impact -==================== - -The Magnum Developer Quickstart document will be updated to support the Open -DC/OS Bay introduced by including a short example and a full documentation -with all the explanation for the labels in the user guide. Additionally, -background information on how to use the Open DC/OS Bay will be included. - -References -========== - -.. [1] https://dcos.io/docs/1.7/overview/what-is-dcos/ -.. [2] https://dcos.io/install/ -.. [3] https://blueprints.launchpad.net/magnum/+spec/mesos-slave-flags diff --git a/specs/resource-quotas.rst b/specs/resource-quotas.rst deleted file mode 100644 index 72c790c9..00000000 --- a/specs/resource-quotas.rst +++ /dev/null @@ -1,252 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================== -Quota for Magnum Resources -========================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/magnum/+spec/resource-quota - -There are multiple ways to slice an OpenStack cloud. Imposing quota on these -various slices puts a limitation on the amount of resources that can be -consumed which helps to guarantee "fairness" or fair distribution of resource -at the creation time. If a particular project needs more resources, the -concept of quota, gives the ability to increase the resource count on-demand, -given that the system constraints are not exceeded. - - -Problem description -=================== -At present in Magnum we don't have the concept of Quota on Magnum resources as -a result of which, as long as the underlying Infrastructure as a Service(IaaS) -layer has resources, any user can consume as many resources as they want, with -the hardlimit associated with the tenant/project being the upper bound for the -resources to be consumed. Quotas are tied closely to physical resources and are -billable entity and hence from Magnum's perspective it makes sense to limit the -creation and consumption of a particular kind of resource to a certain value. - -Use cases ---------- -Alice is the admin. She would like to have the feature which will give her -details of Magnum resource consumption so that she can manage her resource -appropriately. - -a. Ability to know current resource consumption. -b. Ability to prohibit overuse by a project. -c. Prevent situation where users in the project get starved because users in - other project consume all the resources. Alice feels something like - "Quota Management" would help to guarantee "fairness". -d. Prevent DOS kind of attack, abuse or error by users where an excessive - amount of resources are created. - -Proposed change -=============== -Proposed change is to introduce a Quota Table which will primarily store the -quota assigned to each resource in a project. For Mitaka, we will restrict -the scope to a Bay, which are Magnum resources. Primarily, as a first step we -will start of by imposing quota on number of bays to be created in a project. -The change also plans to introduce REST API's to GET/PUT/POST/DELETE. CLIs to -get information of Quota for a particular project will also be provided. - -For Mitaka, we will restrict the scope of the resources explicit created and -managed by Magnum. Specifically for Mitaka we will focus on number of -Bays only. Going ahead we might add Quota for containers, etc. The resources -of which a Bay is constructed out of is inherently not only Magnum resource -but involve resource from Nova, Cinder, Neutron etc. Limiting those resource -consumption is out of the scope of this spec and needs a close collaboration -with the quota management framework of the orchestration layer, since the -orchestration layer can invoke the respective IaaS projects API's and get the -consumption details before provisioning. As of now the orchestration layer -used by Magnum, Heat, does not have the concept of Quota, so we will start with -imposing Quota on resources which Magnum manages, Bay, more specifically for -Mitaka. - -When a project is created and if the Magnum service is running, the default -quota for Magnum resources will be set by the values configured in magnum.conf. -Other Openstack projects like Nova [2]_, Cinder [3]_ follow a similar pattern -and we will also do so and hence won't have a separate CLI for quota-create. -Later if the user wants to change the Quota of the resource option will be -provided to do so using magnum quota-update. In situation where all of the -quota for a specific Magnum resource (Bay) has been consumed and is -under use, admin will be allowed to set the quota to a any value lower than -the usage or hardlimit to prohibit users from the project to create new -Bays. This gives more flexibility to the admin to have a better control -on resource consumption. Till the time the resource is not explicitly deleted -the quota associated with the project, for a particular resource, won't be -decreased. In short quota-update support will take into consideration the -new hardlimit for a resource, specified by the admin, and will set the new -value for this resource. - -Before the resource is created, Magnum will check for current count of the -resource(Bays) created for a project. If the resource(Bay) count is less -than the hardlimit set for the Bay, new Bay creation will be allowed. Since -Bay creation is a long running operation, special care will be taken while -computing the available quota. For example, 'in-progress' field in the Quota -usages table will be updated when the resource(Bay) creation is initiated and -is in progress. Lets say the quota hardlimit is 5 and 3 Bay's have already been -created and two new requests come in to create new Bays. Since we have 3 Bays -already created the 'used' field will be set to 3. Now the 'in-progress' -field will be set to 2 till the time the Bay creation is successful. Once -the Bay creation is done this field will be reset to 0, and the 'used' -count will be updated from 3 to 5. So at this moment, hardlimit is 5, used -is 5 and in-progress is 0. So lets say one more request comes in to create -new Bay this request will be prohibited since there is not enough quota -available. - -For Bays, - -available = hard_limit - [in_progress + used] - -In general, - -Resource quota available = Resource hard_limit - [ -(Resource creation in progress + Resources already created for project)] - -Alternatives ------------- -At present there is not quota infrastructure in Magnum. - -Adding Quota Management layer at the Orchestration layer, Heat, could be an -alternative. Doing so will give a finer view of resource consumption at the -IaaS layer which can be used while provisioning Magnum resources which -depend on the IaaS layer [1]_. - -Data model impact ------------------ -New Quota and Quota usages table will be introduced to Magnum database to -store quota consumption for each resource in a project. - -Quota Table : - -+------------+--------------+------+-----+---------+----------------+ -| Field | Type | Null | Key | Default | Extra | -+------------+--------------+------+-----+---------+----------------+ -| id | int(11) | NO | PRI | NULL | auto_increment | -| created_at | datetime | YES | | NULL | | -| updated_at | datetime | YES | | NULL | | -| project_id | varchar(255) | YES | MUL | NULL | | -| resource | varchar(255) | NO | | NULL | | -| hard_limit | int(11) | YES | | NULL | | -+------------+--------------+------+-----+---------+----------------+ - -Quota usages table : - -+---------------+--------------+------+-----+---------+----------------+ -| Field | Type | Null | Key | Default | Extra | -+---------------+--------------+------+-----+---------+----------------+ -| created_at | datetime | YES | | NULL | | -| updated_at | datetime | YES | | NULL | | -| id | int(11) | NO | PRI | NULL | auto_increment | -| project_id | varchar(255) | YES | MUL | NULL | | -| resource | varchar(255) | NO | | NULL | | -| in_progress | int(11) | NO | | NULL | | -| used | int(11) | NO | | NULL | | -+---------------+--------------+------+-----+---------+----------------+ - - -REST API impact ---------------- -REST API will be added for : - -1. quota-defaults List all default quotas for all tenants. -2. quota-show List the currently set quota values for a tenant. -3. quota-update Updates quotas for a tenant. -4. quota-usage Lists quota usage for a tenant. -5. quota-list List quota for all the tenants. - -A user with "admin" role will be able to do all the above operations but a user -with "non-admin" role will be restricted to only get/list quota associated to -his/her tenant. User with "non-admin" role can be a Member of the tenant less -"admin" role. - -REST API for resources which will have quota imposed will be enhanced : - -1. Bay create -Will check if there is quota available for Bay creation, if so proceed -ahead with the request otherwise throw exception that not enough quota is -available. - -Security impact ---------------- -None - -Notifications impact --------------------- -None - -Other end user impact ---------------------- -End user will have the option to look at the quota set on the resources, quota -usage by a particular project. - -Performance Impact ------------------- -None - -Other deployer impact ---------------------- -None - -Developer impact ----------------- -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -vilobhmm - -Other contributors: -None - -Work Items ----------- - -1. Introduce Quota and Quota usages table in Magnum database. -2. Introduce API to set/update Quota for a resource, specifically - bay, for Mitaka release. -3. Introduce API to create Quota entry, by default, for a resource. -4. Provide config options that will allow users/admins to set Quota. -5. Make sure that if the resource is deleted the used count from the - quota_usages table will be decremented by the number of resources - deleted. For example, if resource, bay, is deleted then the entries - for it in the Quota usages table should be decremented by the - number of Bays deleted. -6. Provide CLI options to view the quota details : - a. magnum quota-show - b. magnum quota-update - c. magnum quota-defaults - d. magnum quota-usage - e. magnum quota-list -7. Add conf setting for bays default quota since we will focus - on Bays for Mitaka. - -Dependencies -============ -None - -Testing -======= - -1. Each commit will be accompanied with unit tests. -2. Gate functional tests will also be covered. - -Documentation Impact -==================== -None - -References -========== - -.. [1] http://lists.openstack.org/pipermail/openstack-dev/2015-December/082266.html -.. [2] https://github.com/openstack/nova/blob/master/nova/quota.py -.. [3] https://github.com/openstack/nova/blob/master/cinder/quota.py diff --git a/specs/stats-api-spec.rst b/specs/stats-api-spec.rst deleted file mode 100644 index 8b441d09..00000000 --- a/specs/stats-api-spec.rst +++ /dev/null @@ -1,228 +0,0 @@ -======================== -Magnum Cluster Stats API -======================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/magnum/+spec/magnum-stats-api - -This proposal is to add a new Magnum statistics API to provide useful metrics -to OpenStack administrators/service providers as well as users. - -Problem Description -------------------- - -Currently there is no magnum API to get usage metrics. This specification -document proposes to add a new stats endpoint to Magnum API. The proposed -stats endpoint will provide useful metrics such as overall current usage info -to OpenStack service providers and also non-admin tenants will be able to -fetch tenant scoped statistics. - - -Use Cases ---------- - -Below given are some of the use cases that can be addressed by implementing -stats API for Magnum: - -1. A Magnum tenant with admin role would like to get the total number of - active clusters, nodes, floating IPs and Cinder volumes for all active - tenants. - -2. A Magnum tenant with admin role would like to get the total number of - active clusters, nodes, floating IPs and Cinder volumes for a specific - tenant. - -3. A Magnum tenant without admin role can get the total number of active - clusters, nodes, floating IPs and Cinder volumes scoped to that tenant. - -4. A Magnum tenant would like to discover the sum of allocated server capacity - for a given cluster (in terms of aggregate vcpu, memory, local storage, and - cinder volume storage). - -5. A Magnum tenant with admin role would like to discover the aggregate server - capacity (in terms of aggregate vcpu, memory, local storage, and cinder - volume storage) allocated by all clusters belonging to a specific tenant or - all the tenants. - -Please note that this is not an exhaustive list of use cases and additional -specs will be proposed based on the community needs. - - -Proposed Changes ----------------- - -The proposed change is to add a new '/stats' REST API endpoint to Magnum -service that will provide total number of clusters, nodes, floating IPs, -Cinder volumes and also a summary view of server capacity (in terms of -aggregate vcpu, memory, local storage, and cinder volume storage) allocated -to a cluster, or to all the clusters owned by the given tenant or all the -tenants. - -1. Add an API that returns total number of clusters, nodes, floating IPs, and - Cinder volumes of all tenants. - -2. Add an API that returns total number of clusters, nodes, floating IPs, and - Cinder volumes of a specific tenant. - -3. Add an API that returns aggregate vcpu, memory, local storage, and cinder - volume storage for the given cluster. - -4. Add an API that returns aggregate vcpu, memory, local storage, and cinder - volume storage allocated by all clusters belonging to a specific tenant. - -5. Update policy.json file to enable access to '/stats' endpoint to owner and - admin (using a policy rule admin_or_owner). - -In the initial implementation stats data will be aggregated from Magnum DB -and/or from other OpenStack services on demand. There will be some interaction -between the conductor and the drivers through an interface. If needed, this -on-demand stats aggregation implementation can be updated in future without -affecting the REST API behavior. For example, if the proposed on-demand data -aggregation is not responsive, Magnum conductor may need to collect the stats -periodically and save in the Magnum DB. - -Initial work in progress review [2]. - - -Alternatives ------------- - -Without proposed stats endpoint, an administrator could use OpenStack clients -to get some basic statistics such as server count, volume count etc. by -relying on the Magnum naming convention. For example, to get nova instance -count: -nova list | grep -e "kube-" -e "swarm-" -e "mesos-" | wc - -For the number of cinder volumes: -cinder list | grep "docker_volume" | wc -l - -For float IPs count: -openstack ip floating list -f value|wc -l - -For clusters count: -magnum cluster-list | grep "CREATE_COMPLETE" | wc -l - - -Data model impact ------------------ - -None, because data will be aggregated and summarized at the time of each stats -API request, so no stats need to be persisted in the data store. - -REST API impact ---------------- - -Add a new REST endpoint '/stats' as shown below: - -A GET request with admin role to '/stats?type=cluster' will return the total -clusters, nodes, floating IPs and Cinder volumes for all active tenants. - -A GET request without admin role to '/stats?type=cluster' will return the -total clusters, nodes, floating IPs and Cinder volumes for the current tenant. - -A GET request with admin role to '/stats?type=cluster&tenant=' will -return the total clusters, nodes, floating IPs and Cinder volumes for the -given tenant. - -A GET request to '/stats?type=cluster&tenant=' without admin role -will result in HTTP status code 403 (Permission denied) if the requester -tenant-id does not match the tenant-id provided in the URI. If it matches, -stats will be scoped to the requested tenant. - - -Other Implementation Option ---------------------------- - -Existing /cluster API can be updated to include stats info as shown below: - -A 'GET' request with admin role to '/cluster/stats' will return total active -clusters and nodes across all the tenants. - -A 'GET' request to '/cluster/stats/' will return total clusters and -nodes for the given tenant. - -A 'GET' request without admin role to '/cluster/stats/' will result -in HTTP status code 403 (Permission denied). - -This option was discussed and rejected due to the fact that /cluster/stats -collide with /cluster/. - - -Security impact ---------------- - -There will be changes to policy.json file that enable access to '/stats' -endpoint to owner and admin (using a policy rule admin_or_owner). - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -New /stats endpoint will be available to users. - -Performance impact ------------------- - -None - -Other deployer impact ---------------------- - -None. - -Developer impact ----------------- - -None - -Implementation --------------- - -Assignee(s) ------------ - -Primary assignee - vijendar-komalla - -Work Items ----------- - -1. Implement /stats API in Magnum service. - -2. Document new API. - -3. Update Magnum CLI to expose stats functionality. - -Dependencies ------------- - -None - -Testing -------- - -1. Since a new stats endpoint will be introduced with this proposal, need to - update some unit tests. - -2. Add unit tests and functional tests for new functionality introduced. - -Documentation Impact --------------------- - -Update API documentation to include stats API information. - -References ----------- - -[1] - Magnum cluster statistics API blueprint: - -https://blueprints.launchpad.net/magnum/+spec/magnum-stats-api - -[2] - Proposed change under review: - -https://review.openstack.org/391301 diff --git a/specs/tls-support-magnum.rst b/specs/tls-support-magnum.rst deleted file mode 100644 index 87bc72b6..00000000 --- a/specs/tls-support-magnum.rst +++ /dev/null @@ -1,226 +0,0 @@ -===================== -TLS support in Magnum -===================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/magnum/+spec/secure-kubernetes - -Currently there is no authentication in Magnum to provide access control to -limit communication between the Magnum service and the Kubernetes service so -that Kubernetes can not be controlled by a third party. This implementation -closes this security loophole by using TLS as an access control mechanism. -Only the Magnum server will have the key to communicate with any given -Kubernetes API service under its control. An additional benefit of this -approach is that communication over the network will be encrypted, reducing -the chance of eavesdropping on the communication stream. - -Problem Description -------------------- - -Magnum currently controls Kubernetes API services using unauthenticated HTTP. -If an attacker knows the api_address of a Kubernetes Bay, (s)he can control -the cluster without any access control. - -Use Cases ---------- - -1. Operators expect system level control to be protected by access control that -is consistent with industry best practices. Lack of this feature may result in -rejection of Magnum as an option for hosting containerized workloads. - -Proposed Changes ----------------- - -The complete implementation of TLS support in Magnum can be further decomposed -into below smaller implementations. - -1. TLS support in Kubernetes Client Code. ------------------------------------------ - -The current implementation of Kubernetes Client code doesn't have any -authentication. So this implementation will change the client code to -provide authentication using TLS. - -Launchpad blueprint: - -https://blueprints.launchpad.net/magnum/+spec/tls-pythonk8sclient - -2. Generating certificates ----------------------------- - -This task is mainly on how certificates for both client(magnum-conductor) -and server(kube-apiserver) will be generated and who will be the certificate -authority(CA). - -These files can be generated in two ways: - -2.1. Magnum script -------------------- - -This implementation will use standard tool to generate certificates and -keys. This script will be registered on Kubernetes master node while creating -bay. This script will generate certificates, start the secure kube-apiserver -and then register the client certificates at Magnum. - -2.2. Using Barbican -------------------- - -Barbican can also be used as a CA using Dogtag. This implementation will use -Barbican to generate certificates. - -3. TLS Support in Magnum code ------------------------------- - -This work mainly involves deploying a secure bay and supporting the use of -certificates in Magnum to call Kubernetes API. This implementation can be -decomposed into smaller tasks. - -3.1. Create secure bay ----------------------- - -This implementation will deploy a secure kube-apiserver running on Kubernetes -master node. To do so following things needs to be done: - -* Generate certificates -* Copy certificates -* Start a secure kube-apiserver - -3.1.1. Generate certificates ----------------------------- - -The certificates will be generated using any of the above implementation in -section 2. - -3.1.2. Copy certificates ------------------------- - -This depends on how cert and key is generated, the implementation will differ -with each case. - -3.1.2.1. Using Magnum script ----------------------------- - -This script will generate both server and client certificates on Kubernetes -master node. Hence only client certificates needs to be copied to magnum host -node. To copy these files, the script will make a call to magnum-api to store -files. - -3.1.2.2. Using Barbican ------------------------ - -When using Barbican, the cert and key will be generated and stored in Barbican -itself. Either magnum-conductor can fetch the certificates from Barbican and -copy on Kubernetes master node or it can be fetched from Kubernetes master node -also. - -3.1.3. Start a secure kube-apiserver ------------------------------------- - -Above generated certificates will be used to start a secure kube-apiserver -running on Kubernetes master node. - -Now that we have a secure Kubernetes cluster running, any API call to -Kubernetes will be secure. - - -3.2. Support https ------------------- - -While running any Kubernetes resource related APIs, magnum-conductor will -fetch certificate from magnum database or Barbican and use it to make secure -API call. - -4. Barbican support to store certificates securely ----------------------------------------------------- - -Barbican is a REST API designed for the secure storage, provisioning and -management of secrets. The client cert and key must be stored securely. This -implementation will support Barbican in Magnum to store the sensitive data. - -Data model impact ------------------ - -New table 'cert' will be introduced to store the certificates. - -REST API impact ---------------- - -New API /certs will be introduced to store the certificates. - -Security impact ---------------- - -After this support, Magnum will be secure to be used in actual production -environment. Now all the communication to Kubernetes master node will be -secure. -The certificates will be generated by Barbican or standard tool signed by -trusted CAs. -The certificates will be stored safely in Barbican when the Barbican cert -storage option is selected by the administrator. - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance impact ------------------- - -None - -Other deployer impact ---------------------- - -Deployer will need to install Barbican to store certificates. - -Developer impact ----------------- - -None - -Implementation --------------- - -Assignee(s) ------------ - -Primary assignee - madhuri(Madhuri Kumari) - yuanying(Motohiro Otsuka) - -Work Items ----------- - -1. TLS Support in Kubernetes Client code -2. Support for generating keys in Magnum -3. Support creating secure Kubernetes cluster -4. Support Barbican in Magnum to store certificates - -Dependencies ------------- - -Barbican(optional) - -Testing -------- - -Each commit will be accompanied with unit tests. There will also be functional -test to test both good and bad certificates. - -Documentation Impact --------------------- - -Add a document explaining how TLS cert and keys can be generated and guide -updated with how to use the secure model of bays. - - -References ----------- - -None diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 14287b69..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,28 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Despite above warning added by global sync process, please use -# ascii betical order. - -bandit>=1.1.0 # Apache-2.0 -bashate>=0.2 # Apache-2.0 -coverage!=4.4,>=4.0 # Apache-2.0 -doc8 # Apache-2.0 -fixtures>=3.0.0 # Apache-2.0/BSD -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 -mock>=2.0 # BSD -openstackdocstheme>=1.11.0 # Apache-2.0 -oslotest>=1.10.0 # Apache-2.0 -osprofiler>=1.4.0 # Apache-2.0 -os-api-ref>=1.0.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 -python-subunit>=0.0.18 # Apache-2.0/BSD -pytz>=2013.6 # MIT -sphinx>=1.6.2 # BSD -tempest>=16.1.0 # Apache-2.0 -testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=1.4.0 # MIT -# releasenotes -reno!=2.3.1,>=1.8.0 # Apache-2.0 diff --git a/tools/cover.sh b/tools/cover.sh deleted file mode 100755 index 0abfa7b9..00000000 --- a/tools/cover.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -ALLOWED_EXTRA_MISSING=0 - -show_diff () { - head -1 $1 - diff -U 0 $1 $2 | sed 1,2d -} - -if ! git diff --exit-code || ! git diff --cached --exit-code -then - echo "There are uncommitted changes!" - echo "Please clean git working directory and try again" - exit 1 -fi - -# Checkout master and save coverage report -git checkout HEAD^ - -base_op_count=`grep "op\." -R magnum/db/sqlalchemy/alembic/versions/ | wc -l` -baseline_report=$(mktemp -t magnum_coverageXXXXXXX) -find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$*" -coverage report > $baseline_report -mv cover cover-master -cat $baseline_report -baseline_missing=$(awk 'END { print $3 }' $baseline_report) - -# Checkout back and save coverage report -git checkout - - -current_op_count=`grep "op\." -R magnum/db/sqlalchemy/alembic/versions/ | wc -l` -current_report=$(mktemp -t magnum_coverageXXXXXXX) -find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$*" -coverage report > $current_report -current_missing=$(awk 'END { print $3 }' $current_report) - -# Show coverage details -allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING+current_op_count-base_op_count)) - -echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" -echo "Missing lines in master : ${baseline_missing}" -echo "Missing lines in proposed change : ${current_missing}" - -if [ $allowed_missing -ge $current_missing ]; then - if [ $baseline_missing -lt $current_missing ]; then - show_diff $baseline_report $current_report - echo "We believe you can test your code with 100% coverage!" - else - echo "Thank you! You are awesome! Keep writing unit tests! :)" - fi - exit_code=0 -else - show_diff $baseline_report $current_report - echo "Please write more unit tests, we must maintain our test coverage :( " - exit_code=1 -fi - -rm $baseline_report $current_report -exit $exit_code diff --git a/tools/flake8wrap.sh b/tools/flake8wrap.sh deleted file mode 100755 index 919ea672..00000000 --- a/tools/flake8wrap.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -# -# A simple wrapper around flake8 which makes it possible -# to ask it to only verify files changed in the current -# git HEAD patch. -# -# Intended to be invoked via tox: -# -# tox -epep8 -- -HEAD -# - -if test "x$1" = "x-HEAD" ; then - shift - files=$(git diff --name-only HEAD~1 | tr '\n' ' ') - echo "Running flake8 on ${files}" - diff -u --from-file /dev/null ${files} | flake8 --max-complexity 10 --diff "$@" -else - echo "Running flake8 on all files" - exec flake8 --max-complexity 10 "$@" -fi diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh deleted file mode 100755 index 799ac184..00000000 --- a/tools/pretty_tox.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail - -TESTRARGS=$1 - -# --until-failure is not compatible with --subunit see: -# -# https://bugs.launchpad.net/testrepository/+bug/1411804 -# -# this work around exists until that is addressed -if [[ "$TESTARGS" =~ "until-failure" ]]; then - python setup.py testr --slowest --testr-args="$TESTRARGS" -else - python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f -fi diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 3ad304dc..00000000 --- a/tox.ini +++ /dev/null @@ -1,174 +0,0 @@ -[tox] -minversion = 1.6 -envlist = py35,py27,pep8 -skipsdist = True - -[testenv] -usedevelop = True -install_command = - pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} -U {opts} {packages} -whitelist_externals = bash - find - rm -setenv = - VIRTUAL_ENV={envdir} - PYTHONWARNINGS=default::DeprecationWarning -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -passenv = TEMPEST_* OS_TEST_* -commands = - find . -type f -name "*.py[c|o]" -delete - rm -f .testrepository/times.dbm - bash tools/pretty_tox.sh '{posargs}' - -[testenv:debug] -commands = oslo_debug_helper -t magnum/tests/unit {posargs} - -[testenv:debug-py27] -basepython = python2.7 -commands = oslo_debug_helper -t magnum/tests/unit {posargs} - -[testenv:debug-py35] -basepython = python3.5 -commands = oslo_debug_helper -t magnum/tests/unit {posargs} - -[testenv:functional-api] -sitepackages = True -setenv = {[testenv]setenv} - OS_TEST_PATH=./magnum/tests/functional/api - OS_TEST_TIMEOUT=7200 -deps = - {[testenv]deps} -commands = - find . -type f -name "*.py[c|o]" -delete - bash tools/pretty_tox.sh '{posargs}' - -[testenv:functional-k8s] -sitepackages = True -setenv = {[testenv]setenv} - OS_TEST_PATH=./magnum/tests/functional/k8s - OS_TEST_TIMEOUT=7200 -deps = - {[testenv]deps} -commands = - find . -type f -name "*.py[c|o]" -delete - bash tools/pretty_tox.sh '{posargs}' - -[testenv:functional-k8s-ironic] -sitepackages = True -setenv = {[testenv]setenv} - OS_TEST_PATH=./magnum/tests/functional/k8s_ironic - OS_TEST_TIMEOUT=7200 -deps = - {[testenv]deps} -commands = - find . -type f -name "*.py[c|o]" -delete - bash tools/pretty_tox.sh '{posargs}' - -[testenv:functional-k8s-coreos] -sitepackages = True -setenv = {[testenv]setenv} - OS_TEST_PATH=./magnum/tests/functional/k8s_coreos - OS_TEST_TIMEOUT=7200 -deps = - {[testenv]deps} -commands = - find . -type f -name "*.py[c|o]" -delete - bash tools/pretty_tox.sh '{posargs}' - -[testenv:functional-swarm] -sitepackages = True -setenv = {[testenv]setenv} - OS_TEST_PATH=./magnum/tests/functional/swarm - OS_TEST_TIMEOUT=7200 -deps = - {[testenv]deps} -commands = - find . -type f -name "*.py[c|o]" -delete - bash tools/pretty_tox.sh '{posargs}' - -[testenv:functional-swarm-mode] -sitepackages = True -setenv = {[testenv]setenv} - OS_TEST_PATH=./magnum/tests/functional/swarm_mode - OS_TEST_TIMEOUT=7200 -deps = - {[testenv]deps} -commands = - find . -type f -name "*.py[c|o]" -delete - bash tools/pretty_tox.sh '{posargs}' - -[testenv:functional-mesos] -sitepackages = True -setenv = {[testenv]setenv} - OS_TEST_PATH=./magnum/tests/functional/mesos - OS_TEST_TIMEOUT=7200 -deps = - {[testenv]deps} -commands = - find . -type f -name "*.py[c|o]" -delete - bash tools/pretty_tox.sh '{posargs}' - -[testenv:pep8] -commands = - doc8 -e .rst specs/ doc/source/ contrib/ CONTRIBUTING.rst HACKING.rst README.rst - bash tools/flake8wrap.sh {posargs} - bandit -r magnum -x tests -n5 -ll - bash -c "find {toxinidir} \ - -not \( -type d -name .?\* -prune \) \ - -not \( -type d -name doc -prune \) \ - -not \( -type d -name contrib -prune \) \ - -type f \ - -name \*.sh \ - -print0 | xargs -0 bashate -v -iE006,E010,E042 -eE005" - -[testenv:venv] -commands = {posargs} - -[testenv:bandit] -deps = -r{toxinidir}/test-requirements.txt -commands = bandit -r magnum -x tests -n5 -ll - -[testenv:cover] -commands = {toxinidir}/tools/cover.sh {posargs} - -[testenv:docs] -commands = - doc8 -e .rst specs/ doc/source/ contrib/ CONTRIBUTING.rst HACKING.rst README.rst - python setup.py build_sphinx - -[testenv:genconfig] -commands = - oslo-config-generator --config-file etc/magnum/magnum-config-generator.conf - -[flake8] -# H106 Don’t put vim configuration in source files -# H203 Use assertIs(Not)None to check for None -# H904 Delay string interpolations at logging calls -enable-extensions = H106,H203,H904 -exclude = .venv,.git,.tox,dist,*lib/python*,*egg,build,tools,releasenotes - -[hacking] -local-check-factory = magnum.hacking.checks.factory - -[testenv:pip-missing-reqs] -# do not install test-requirements as that will pollute the virtualenv for -# determining missing packages -# this also means that pip-missing-reqs must be installed separately, outside -# of the requirements.txt files -deps = pip_missing_reqs - -rrequirements.txt -commands=pip-missing-reqs -d --ignore-file=magnum/tests/* magnum - -[testenv:releasenotes] -commands = - rm -rf releasenotes/build - sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:install-guide] -commands = sphinx-build -a -E -W -d install-guide/build/doctrees -b html install-guide/source install-guide/build/html - -[testenv:api-ref] -commands = - rm -rf api-ref/build - sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html