diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index c151ee50..00000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = karbor -omit = karbor/tests/* - -[report] -ignore_errors = True diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index 8419f6b8..00000000 --- a/.gitattributes +++ /dev/null @@ -1,16 +0,0 @@ -*.py text eol=lf -*.txt text eol=lf -*.yaml text eol=lf -*.sh text eol=lf -*.conf text eol=lf -*.ini text eol=lf -*.cfg text eol=lf -*.json text eol=lf -*.pu text eol=lf -*.md text eol=lf -*.svg text eol=lf -*.inc text eol=lf - -*.png binary -*.jpg binary -*.jpeg binary diff --git a/.gitignore b/.gitignore deleted file mode 100644 index bf27aa52..00000000 --- a/.gitignore +++ /dev/null @@ -1,54 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -.eggs -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -.testrepository -.venv -.log -!.stestr.conf -# Translations -*.mo - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build -doc/source/contributor/api - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -releasenotes/build -etc/karbor.conf.sample diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6fe..00000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 13130736..00000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${OS_TEST_PATH:-./karbor/tests/unit} -top_dir=./ diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index d450ca44..00000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./karbor/tests/unit} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index cb0a9aef..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,62 +0,0 @@ -- project: - templates: - - openstack-cover-jobs - - openstack-lower-constraints-jobs - - check-requirements - - openstack-python3-victoria-jobs - - publish-openstack-docs-pti - check: - jobs: - - karbor-fullstack: - voting: false - -- job: - name: karbor-fullstack - parent: devstack-tox-functional - timeout: 7800 - required-projects: - - opendev.org/openstack/karbor - - opendev.org/openstack/python-karborclient - - opendev.org/openstack/trove - - opendev.org/openstack/trove-dashboard - - opendev.org/openstack/manila - - opendev.org/openstack/manila-tempest-plugin - vars: - devstack_plugins: - karbor: https://opendev.org/openstack/karbor - manila: https://opendev.org/openstack/manila - tox_envlist: fullstack - devstack_localrc: - API_WORKERS: 4 - VOLUME_BACKING_FILE_SIZE: 20490M - MANILA_OPTGROUP_generic1_driver_handles_share_servers: False - MANILA_USE_SERVICE_INSTANCE_PASSWORD: True - MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS: 'snapshot_support=True create_share_from_snapshot_support=True mount_snapshot_support=True driver_handles_share_servers=False' - SHARE_DRIVER: manila.share.drivers.lvm.LVMShareDriver - SWIFT_HASH: 66a3d6b56c1f479c8b4e70ab5c2000f5 - SWIFT_REPLICAS: 1 - SWIFT_LOOPBACK_DISK_SIZE: 10G - DATABASE_PASSWORD: password - RABBIT_PASSWORD: password - SERVICE_PASSWORD: password - SERVICE_TOKEN: password - ADMIN_PASSWORD: password - devstack_local_conf: - test-config: - $SWIFT_CONFIG_PROXY_SERVER: - DEFAULT: - workers: 4 - $CINDER_CONF: - DEFAULT: - osapi_volume_workers: 8 - rpc_response_timeout: 120 - $KARBOR_API_CONF: - DEFAULT: - max_window_time: 150 - min_window_time: 75 - min_interval: 300 - devstack_services: - tls-proxy: false - karbor-api: True - karbor-protection: True - karbor-operationengine: True diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 608272c6..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,15 +0,0 @@ -If you would like to contribute to the development of OpenStack, you must -follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack should be -submitted for review via the Gerrit tool, following the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/karbor diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 60938c31..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -karbor Style Commandments -=============================================== - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README.rst b/README.rst index 6807e9f7..b7cee05b 100644 --- a/README.rst +++ b/README.rst @@ -1,81 +1,11 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/karbor.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html - -.. Change things from this point on - -====== -Karbor -====== - -Application Data Protection as a Service for OpenStack - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/mascot/OpenStack_Project_Karbor_vertical.png - :alt: Karbor - :width: 300 - :height: 525 - :align: center +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -***************** -Mission Statement -***************** - -To protect the Data and Metadata that comprises an OpenStack-deployed -Application against loss/damage (e.g. backup, replication) by providing a -standard framework of APIs and services that allows vendors to provide plugins -through a unified interface - -Open Architecture -""""""""""""""""" - -Design for multiple perspectives: - -* User: Protect App Deployment - - * Configure and manage custom protection plans on the deployed resources - (topology, VMs, volumes, images, ...) - -* Admin: Define Protectable Resources - - * Decide what plugins protect which resources, what is available for the user - * Decide where users can protect their resources - -* Vendors: Standard API for protection products - - * Create plugins that implement Protection mechanisms for different OpenStack - resources - -***** -Links -***** - -* Free software: Apache license -* Wiki: https://wiki.openstack.org/wiki/Karbor -* Documentation: https://docs.openstack.org/karbor/latest/ -* Admin guide: https://docs.openstack.org/karbor/latest/admin/index.html -* Source: https://opendev.org/openstack/karbor -* Bugs: https://storyboard.openstack.org/#!/project/openstack/karbor -* Release notes: https://docs.openstack.org/karbor/latest/releasenotes.html - -******** -Features -******** - -Version 0.1 -""""""""""" - -* Resource API -* Plan API -* Bank API -* Ledger API -* Cross-resource dependencies - -Limitations -*********** - -* Only 1 Bank plugin per Protection Plan -* Automatic object discovery not supported +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index 89cd3fcc..00000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,208 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# karbor documentation build configuration file, created by -# sphinx-quickstart on Mon Sep 19 15:17:47 2016. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -extensions = [ - 'openstackdocstheme', - 'os_api_ref' -] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Data Protection API Reference' -copyright = u'OpenStack Foundation' - -repository_name = 'openstack/karbor' -bug_project = 'karbor' -bug_tag = 'api-ref' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -html_theme = 'openstackdocs' -html_theme_options = { - "sidebar_mode": "toc", -} - - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'karbordoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'karbor.tex', u'OpenStack Data Protection API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index a77b77d9..00000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -=================== -Data Protection API -=================== - -Contents: - -.. toctree:: - :maxdepth: 1 - - v1/index - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/api-ref/source/v1/index.rst b/api-ref/source/v1/index.rst deleted file mode 100644 index 67b921a7..00000000 --- a/api-ref/source/v1/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -:tocdepth: 2 - -====================== -Data Protection API V1 -====================== - -.. rest_expand_all:: - -.. include:: karbor-v1-protectables.inc -.. include:: karbor-v1-providers.inc -.. include:: karbor-v1-plans.inc -.. include:: karbor-v1-triggers.inc -.. include:: karbor-v1-scheduled-operations.inc -.. include:: karbor-v1-checkpoints.inc -.. include:: karbor-v1-restores.inc -.. include:: karbor-v1-operation-logs.inc -.. include:: karbor-v1-services.inc diff --git a/api-ref/source/v1/karbor-v1-checkpoints.inc b/api-ref/source/v1/karbor-v1-checkpoints.inc deleted file mode 100644 index 9ed5495b..00000000 --- a/api-ref/source/v1/karbor-v1-checkpoints.inc +++ /dev/null @@ -1,271 +0,0 @@ -.. -*- rst -*- - -=========== -Checkpoints -=========== - -This API enables the Karbor user to access and manage the checkpoints stored in -the protection provider: - -- List all checkpoints given a Bank ID. -- Show Information on a given checkpoint ID. -- Delete a checkpoint. -- Create a checkpoint. - -When you perform the above operation, these status values are possible: - -+-----------------+----------------------------------------------------------+ -| Status | Description | -+=================+==========================================================+ -| error | A checkpoint creation error occurred. | -+-----------------+----------------------------------------------------------+ -| protecting | The checkpoint is being created. | -+-----------------+----------------------------------------------------------+ -| available | The checkpoint is created, and available. | -+-----------------+----------------------------------------------------------+ -| deleting | The checkpoint is being deleted. | -+-----------------+----------------------------------------------------------+ -| deleted | The checkpoint is deleted. | -+-----------------+----------------------------------------------------------+ -| error-deleting | A checkpoint deletion error occurred. | -+-----------------+----------------------------------------------------------+ - - -List checkpoints -================ - -.. rest_method:: GET /v1/{tenant_id}/providers/{provider_id}/checkpoints - -List all the checkpoints offered at the given provider, or part of checkpoints -limited by ``?limit={limit_num}`` by ``GET`` method. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - provider_id: provider_id_1 - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - checkpoints: checkpoint_list - - id: checkpoint_id - - project_id: tenant_id_1 - - status: checkpoint_status - - protection_plan: plan - - resource_graph: resource_graph - - checkpoints_links: links - -Response Example ----------------- - -.. literalinclude:: ./samples/checkpoints-list-response.json - :language: javascript - - -Create checkpoint -================= - -.. rest_method:: POST /v1/{tenant_id}/providers/{provider_id}/checkpoints - -Execute the protect operation for the specified plan and create a checkpoint -at a given provider. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - provider_id: provider_id_1 - - checkpoint: checkpoint - - plan_id: plan_id - - extra_info: extra_info - -Request Example ---------------- - -.. literalinclude:: ./samples/checkpoint-create-request.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - checkpoint: checkpoint - - id: checkpoint_id - - project_id: tenant_id_1 - - status: checkpoint_status - - protection_plan: plan - - resource_graph: resource_graph - -Response Example ----------------- - -.. literalinclude:: ./samples/checkpoint-create-response.json - :language: javascript - - -Show checkpoint -=============== - -.. rest_method:: GET /v1/{tenant_id}/providers/{provider_id}/checkpoints/{checkpoint_id} - -Shows the information about the specified checkpoint offered at a given -provider. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - provider_id: provider_id_1 - - checkpoint_id: checkpoint_id_1 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - checkpoint: checkpoint - - id: checkpoint_id - - project_id: tenant_id_1 - - status: checkpoint_status - - protection_plan: plan - - resource_graph: resource_graph - -Response Example ----------------- - -.. literalinclude:: ./samples/checkpoint-create-response.json - :language: javascript - - -Delete checkpoint -================= - -.. rest_method:: DELETE /v1/{tenant_id}/providers/{provider_id}/checkpoints/{checkpoint_id} - -Deletes a specific checkpoint. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - provider_id: provider_id_1 - - checkpoint_id: checkpoint_id_1 - -Response --------- - -Empty dict - -Response Example ----------------- - -.. literalinclude:: ./samples/checkpoint-delete-response.json - :language: javascript - - -Update checkpoint -================= - -.. rest_method:: PUT /v1/{tenant_id}/providers/{provider_id}/checkpoints/{checkpoint_id} - -Updates a specific checkpoint. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - provider_id: provider_id_1 - - checkpoint_id: checkpoint_id_1 - - os-resetState: os-resetState - - os-resetState.state: state - -Response --------- - -Empty dict - -Response Example ----------------- - -.. literalinclude:: ./samples/checkpoint-reset-state-response.json - :language: javascript \ No newline at end of file diff --git a/api-ref/source/v1/karbor-v1-operation-logs.inc b/api-ref/source/v1/karbor-v1-operation-logs.inc deleted file mode 100644 index cbec044d..00000000 --- a/api-ref/source/v1/karbor-v1-operation-logs.inc +++ /dev/null @@ -1,136 +0,0 @@ -.. -*- rst -*- - -============== -Operation logs -============== - -This API enables the Karbor user get the information about operation logs: - -- List all operation logs by a given project. -- Show the information of a given operation log. - -When you perform the above operation, these status values are possible: - -+-----------------+----------------------------------------------------------+ -| Status | Description | -+=================+==========================================================+ -| available | A protect operation is finished. | -+-----------------+----------------------------------------------------------+ -| success | A restore operation is success. | -+-----------------+----------------------------------------------------------+ -| deleted | A delete operation is finished. | -+-----------------+----------------------------------------------------------+ - - -List operation logs -=================== - -.. rest_method:: GET /v1/{tenant_id}/operation_logs - -List all operation logs, triggered by a given project, or part of the operation -logs limited by ``?limit={limit_num}`` by ``GET`` method. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - operation_logs: operation_log_list - - id: operation_log_id - - project_id: tenant_id_1 - - operation_type: operation_type - - checkpoint_id: checkpoint_id_2 - - plan_id: plan_id_1 - - provider_id: provider_id_2 - - restore_id: restore_id_2 - - scheduled_operation_id: operation_id_2 - - status: operation_log_status - - started_at: started_at - - ended_at: ended_at - - error_info: error_info - - extra_info: extra_info - - operation_logs_links: links - -Response Example ----------------- - -.. literalinclude:: ./samples/operation-logs-list-response.json - :language: javascript - - -Show operation log -================== - -.. rest_method:: GET /v1/{tenant_id}/operation_logs/{operation_log_id} - -Show the information of a given operation log. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - operation_log_id: operation_log_id_1 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - operation_log: operation_log - - id: operation_log_id - - project_id: tenant_id_1 - - operation_type: operation_type_1 - - checkpoint_id: checkpoint_id_2 - - plan_id: plan_id_1 - - provider_id: provider_id_2 - - restore_id: restore_id_2 - - scheduled_operation_id: operation_id_2 - - status: operation_log_status - - started_at: started_at - - ended_at: ended_at - - error_info: error_info - - extra_info: extra_info_1 - -Response Example ----------------- - -.. literalinclude:: ./samples/operation-log-show-response.json - :language: javascript diff --git a/api-ref/source/v1/karbor-v1-plans.inc b/api-ref/source/v1/karbor-v1-plans.inc deleted file mode 100644 index a0e9e823..00000000 --- a/api-ref/source/v1/karbor-v1-plans.inc +++ /dev/null @@ -1,268 +0,0 @@ -.. -*- rst -*- - -===== -Plans -===== - -This API enables the Karbor user to access the protection Plan registry and do -the following operations: - -- Plan CRUD. -- List Plans. -- Starting and suspending of plans. - -When you perform the above operation, these status values are possible: - -+-----------------+----------------------------------------------------------+ -| Status | Description | -+=================+==========================================================+ -| started | A plan is updated. | -+-----------------+----------------------------------------------------------+ -| suspended | A plan is created. | -+-----------------+----------------------------------------------------------+ - - -List plans -========== - -.. rest_method:: GET /v1/{tenant_id}/plans - -List all the protection plans offered for the given project, or part of the -protection plans limited by ``?limit={limit_num}`` by ``GET`` method. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - plans: plan_list - - id: plan_id - - name: plan_name - - resources: resources - - status: plan_status - - provider_id: provider_id - - parameters: plan_parameters - - plans_links: links - -Response Example ----------------- - -.. literalinclude:: ./samples/plans-list-response.json - :language: javascript - - -Create plan -=========== - -.. rest_method:: POST /v1/{tenant_id}/plans - -Create a new plan. - -To specify the parameters for this plan, include the parameters -in the ``parameters`` attribute in the request body. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - plan: plan - - name: plan_name - - resources: resources - - provider_id: provider_id - - parameters: plan_parameters - -Request Example ---------------- - -.. literalinclude:: ./samples/plan-create-request.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - plan: plan - - id: plan_id - - name: plan_name - - resources: resources - - status: plan_status - - provider_id: provider_id - - parameters: plan_parameters - -Response Example ----------------- - -.. literalinclude:: ./samples/plan-create-response.json - :language: javascript - - -Show plan -========= - -.. rest_method:: GET /v1/{tenant_id}/plans/{plan_id} - -Shows the information about a specific plan. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - plan_id: plan_id - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - plan: plan - - id: plan_id - - name: plan_name - - resources: resources - - status: plan_status - - provider_id: provider_id - - parameters: plan_parameters - -Response Example ----------------- - -.. literalinclude:: ./samples/plan-create-response.json - :language: javascript - - -Update plan -=========== - -.. rest_method:: PUT /v1/{tenant_id}/plans/{plan_id} - -Updates a specific plan. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - plan_id: plan_id - - plan: plan - - name: plan_name_1 - - resources: resources_1 - - status: plan_status_1 - -Request Example ---------------- - -.. literalinclude:: ./samples/plan-update-request.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - plan: plan - - id: plan_id - - name: plan_name - - resources: resources - - status: plan_status - - provider_id: provider_id - - parameters: plan_parameters - -Response Example ----------------- - -.. literalinclude:: ./samples/plan-update-response.json - :language: javascript - - -Delete plan -=========== - -.. rest_method:: DELETE /v1/{tenant_id}/plans/{plan_id} - -Deletes a specific plan. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 202 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - plan_id: plan_id diff --git a/api-ref/source/v1/karbor-v1-protectables.inc b/api-ref/source/v1/karbor-v1-protectables.inc deleted file mode 100644 index 9ab02c42..00000000 --- a/api-ref/source/v1/karbor-v1-protectables.inc +++ /dev/null @@ -1,192 +0,0 @@ -.. -*- rst -*- - -============ -Protectables -============ - -Enables the Karbor user to access information about which resource types are -protectable (i.e. can be protected by Karbor). In addition, enables the user -to get additional information on each resource type, such as a list of actual -instances and their dependencies. - - -List protectable types -====================== - -.. rest_method:: GET /v1/{tenant_id}/protectables - -Lists all the available protectable types. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - protectable_type: protectable_type - -Response Example ----------------- - -.. literalinclude:: ./samples/protectables-list-response.json - :language: javascript - - -Show protectable type -===================== - -.. rest_method:: GET /v1/{tenant_id}/protectables/{protectable_type} - -Shows the information of a given protectable type. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - protectable_type: protectable_type_1 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - protectable_type: protectable_type_2 - - name: protectable_name - - dependent_types: dependent_types - -Response Example ----------------- - -.. literalinclude:: ./samples/protectable-show-response.json - :language: javascript - - -List protectable instances -========================== - -.. rest_method:: GET /v1/{tenant_id}/protectables/{protectable_type}/instances - -List all the available instances for the given protectable type. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - protectable_type: protectable_type_1 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - instances: protectable_instance_list - - id: protectable_instance_id - - type: protectable_type_3 - - name: protectable_instance_name - - dependent_resources: dependent_resources - - instances_links: links - -Response Example ----------------- - -.. literalinclude:: ./samples/protectable-instances-list-response.json - :language: javascript - - -Show protectable instance -========================= - -.. rest_method:: GET /v1/{tenant_id}/protectables/{protectable_type}/instances/{resource_id} - -Show the information about a specific instance and its immediate dependencies. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - protectable_type: protectable_type_1 - - resource_id: resource_id - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - instance: protectable_instance - - id: protectable_instance_id - - type: protectable_type_3 - - name: protectable_instance_name - - dependent_resources: dependent_resources - -Response Example ----------------- - -.. literalinclude:: ./samples/protectable-instance-show-response.json - :language: javascript diff --git a/api-ref/source/v1/karbor-v1-providers.inc b/api-ref/source/v1/karbor-v1-providers.inc deleted file mode 100644 index a7ccbd71..00000000 --- a/api-ref/source/v1/karbor-v1-providers.inc +++ /dev/null @@ -1,109 +0,0 @@ -.. -*- rst -*- - -========= -Providers -========= - -Enables the Karbor user to list available providers and get parameters and -result schema super-set for all plugins of a specific Provider. - - -List protection providers -========================= - -.. rest_method:: GET /v1/{tenant_id}/providers - -List all the information about the providers offered at a given service, or -part of the providers limited by ``?limit={limit_num}`` by ``GET`` method. -All providers need to be configured first by the admin. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - providers: provider_list - - id: provider_id - - name: provider_name - - description: description - - extended_info_schema: schema_extended_info - - saved_info_schema: schema_saved_info - - restore_schema: schema_restore - - providers_links: links - -Response Example ----------------- - -.. literalinclude:: ./samples/providers-list-response.json - :language: javascript - - -Show protection provider -======================== - -.. rest_method:: GET /v1/{tenant_id}/providers/{provider_id} - -Shows the information about a specific provider. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - provider_id: provider_id_1 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - provider: provider - - id: provider_id - - name: provider_name - - description: description - - extended_info_schema: schema_extended_info - - saved_info_schema: schema_saved_info - - restore_schema: schema_restore - -Response Example ----------------- - -.. literalinclude:: ./samples/provider-show-response.json - :language: javascript diff --git a/api-ref/source/v1/karbor-v1-restores.inc b/api-ref/source/v1/karbor-v1-restores.inc deleted file mode 100644 index eb1b5ba5..00000000 --- a/api-ref/source/v1/karbor-v1-restores.inc +++ /dev/null @@ -1,193 +0,0 @@ -.. -*- rst -*- - -======== -Restores -======== - -This API enables the Karbor user restore a checkpoint on to a restore target: - -- List all restores by a given project. -- Create restored system from a checkpoint. -- Show the information of a given restore operation. - -When you perform the above operation, these status values are possible: - -+-----------------+----------------------------------------------------------+ -| Status | Description | -+=================+==========================================================+ -| started | A restore operation is in progress. | -+-----------------+----------------------------------------------------------+ -| success | A restore operation is success. | -+-----------------+----------------------------------------------------------+ -| failed | A restore operation is failed. | -+-----------------+----------------------------------------------------------+ - - -List restores -============= - -.. rest_method:: GET /v1/{tenant_id}/restores - -List all restores started, success or failed, triggered by a given project, -or part of the protection plans limited by ``?limit={limit_num}`` by ``GET`` -method. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - restores: restore_list - - id: restore_id - - project_id: tenant_id_1 - - provider_id: provider_id - - checkpoint_id: checkpoint_id - - restore_target: restore_target - - parameters: restore_parameters - - status: restore_status - - resource_status: restore_resource_status - - resource_reason: restore_resource_reason - - restores_links: links - -Response Example ----------------- - -.. literalinclude:: ./samples/restores-list-response.json - :language: javascript - - -Create restore -============== - -.. rest_method:: POST /v1/{tenant_id}/restores - -Start a restore operation. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - restore: restore - - provider_id: provider_id - - checkpoint_id: checkpoint_id - - restore_target: restore_target - - restore_auth: restore_auth - - parameters: restore_parameters - -Request Example ---------------- - -.. literalinclude:: ./samples/restore-create-request.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - restore: restore - - id: restore_id - - project_id: tenant_id_1 - - provider_id: provider_id - - checkpoint_id: checkpoint_id - - restore_target: restore_target - - restore_auth: restore_auth - - parameters: restore_parameters - - status: restore_status - - resource_status: restore_resource_status - - resource_reason: restore_resource_reason - -Response Example ----------------- - -.. literalinclude:: ./samples/restore-create-response.json - :language: javascript - - -Show restore -============ - -.. rest_method:: GET /v1/{tenant_id}/restores/{restore_id} - -Show the information of a given restore operation. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - restore_id: restore_id_1 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - restore: restore - - id: restore_id - - project_id: tenant_id_1 - - provider_id: provider_id - - checkpoint_id: checkpoint_id - - restore_target: restore_target - - parameters: restore_parameters - - status: restore_status - - resource_status: restore_resource_status - - resource_reason: restore_resource_reason - -Response Example ----------------- - -.. literalinclude:: ./samples/restore-create-response.json - :language: javascript diff --git a/api-ref/source/v1/karbor-v1-scheduled-operations.inc b/api-ref/source/v1/karbor-v1-scheduled-operations.inc deleted file mode 100644 index 643d55b3..00000000 --- a/api-ref/source/v1/karbor-v1-scheduled-operations.inc +++ /dev/null @@ -1,204 +0,0 @@ -.. -*- rst -*- - -==================== -Scheduled operations -==================== - -This API enables the Karbor user to manage protection Operations: - -- Create a checkpoint for a given Protection Plan. -- Delete unneeded checkpoints from the provider. -- Status on a given Operation ID. - -Scheduled operations are operations that will be executed when a specific -trigger is triggered. - - -List scheduled operations -========================= - -.. rest_method:: GET /v1/{tenant_id}/scheduled_operations - -List all the scheduled operations based on trigger and plan, or part of the -scheduled operations limited by ``?limit={limit_num}`` by ``GET`` method. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - operations: operation_list - - scheduled_operation: operation - - id: operation_id - - name: operation_name - - description: description - - operation_type: operation_type - - trigger_id: trigger_id - - operation_definition: operation_definition - - enabled: enabled - - plans_links: links - -Response Example ----------------- - -.. literalinclude:: ./samples/scheduled-operations-list-response.json - :language: javascript - - -Create scheduled operation -========================== - -.. rest_method:: POST /v1/{tenant_id}/scheduled_operations - -Create a new scheduled operation. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - scheduled_operation: operation - - name: operation_name - - description: description - - operation_type: operation_type - - trigger_id: trigger_id - - operation_definition: operation_definition - -Request Example ---------------- - -.. literalinclude:: ./samples/scheduled-operation-create-request.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - scheduled_operation: operation - - id: operation_id - - name: operation_name - - description: description - - operation_type: operation_type - - trigger_id: trigger_id - - operation_definition: operation_definition - - enabled: enabled - -Response Example ----------------- - -.. literalinclude:: ./samples/scheduled-operation-create-response.json - :language: javascript - - -Show scheduled operation -======================== - -.. rest_method:: GET /v1/{tenant_id}/scheduled_operations/{scheduled_operation_id} - -Shows the specified scheduled operation information. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - scheduled_operation_id: operation_id_1 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - scheduled_operation: operation - - id: operation_id - - name: operation_name - - description: description - - operation_type: operation_type - - trigger_id: trigger_id - - operation_definition: operation_definition - - enabled: enabled - -Response Example ----------------- - -.. literalinclude:: ./samples/scheduled-operation-create-response.json - :language: javascript - - -Delete scheduled operation -========================== - -.. rest_method:: DELETE /v1/{tenant_id}/scheduled_operations/{scheduled_operation_id} - -Deletes a specific scheduled operation. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 202 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - scheduled_operation_id: operation_id_1 diff --git a/api-ref/source/v1/karbor-v1-services.inc b/api-ref/source/v1/karbor-v1-services.inc deleted file mode 100644 index b10f4637..00000000 --- a/api-ref/source/v1/karbor-v1-services.inc +++ /dev/null @@ -1,115 +0,0 @@ -.. -*- rst -*- - -======== -Services -======== -Administrator only. Lists all Karbor services, enables or disables a Karbor -service, freeze or thaw the specified karbor-operationengine host, failover a -replicating karbor-operationengine host. - - -List All Karbor Services -======================== - -.. rest_method:: GET /v1/{tenant_id}/os-services - -Lists all Karbor services. Provides details why any services were disabled. - - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - host: host_query - - binary: binary_query - -Response --------- - -.. rest_parameters:: parameters.yaml - - - services: services - - id: service_id - - binary: binary_required - - host: host_name_body_req - - status: service_status - - state: service_state_up_down - - updated_at: updated - - disabled_reason: disabled_reason_body_req - -Response Example ----------------- - -.. literalinclude:: ./samples/services-list-response.json - :language: javascript - - -Update a Karbor Service Status -============================== - -.. rest_method:: PUT /v1/{tenant_id}/os-services/{service_id} - -Update a Karbor operationengine service status. Specify the service by its id. - -Response codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - service_id: service_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/services-update-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - service: service_body - - id: service_id - - binary: binary_required - - host: host_name_body_req - - status: service_status - - state: service_state_up_down - - updated_at: updated - - disabled_reason: disabled_reason_body_req - -Response Example ----------------- - -.. literalinclude:: ./samples/services-update-response.json - :language: javascript diff --git a/api-ref/source/v1/karbor-v1-triggers.inc b/api-ref/source/v1/karbor-v1-triggers.inc deleted file mode 100644 index 2fc8b8e2..00000000 --- a/api-ref/source/v1/karbor-v1-triggers.inc +++ /dev/null @@ -1,245 +0,0 @@ -.. -*- rst -*- - -======== -Triggers -======== - -This API enables the Karbor user to access the trigger registry and do -the following operations: - -- Trigger CRUD. -- List Triggers. - - -List triggers -============= - -.. rest_method:: GET /v1/{tenant_id}/triggers - -List all the triggers offered for the given project, or part of the triggers -limited by ``?limit={limit_num}`` by ``GET`` method. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - triggers: trigger_list - - id: trigger_id - - type: trigger_type - - name: trigger_name - - properties: trigger_properties - - plans_links: links - -Response Example ----------------- - -.. literalinclude:: ./samples/triggers-list-response.json - :language: javascript - - -Create trigger -============== - -.. rest_method:: POST /v1/{tenant_id}/triggers - -Create a new trigger. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - trigger_info: trigger - - type: trigger_type - - name: trigger_name - - properties: trigger_properties - -Request Example ---------------- - -.. literalinclude:: ./samples/trigger-create-request.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - trigger_info: trigger - - id: trigger_id - - type: trigger_type - - name: trigger_name - - properties: trigger_properties - -Response Example ----------------- - -.. literalinclude:: ./samples/trigger-create-response.json - :language: javascript - - -Show trigger -============ - -.. rest_method:: GET /v1/{tenant_id}/triggers/{trigger_id} - -Shows the information about a specified trigger. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - trigger_id: trigger_id_1 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - trigger_info: trigger - - id: trigger_id - - type: trigger_type - - name: trigger_name - - properties: trigger_properties - -Response Example ----------------- - -.. literalinclude:: ./samples/trigger-create-response.json - :language: javascript - - -Update trigger -============== - -.. rest_method:: PUT /v1/{tenant_id}/triggers/{trigger_id} - -Updates the name or properties of a specific trigger alone or at the same -time. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - trigger_id: trigger_id_1 - - trigger_info: trigger - - name: trigger_name_1 - - properties: trigger_properties_1 - -Request Example ---------------- - -.. literalinclude:: ./samples/trigger-update-request.json - :language: javascript - -Response --------- - -.. rest_parameters:: parameters.yaml - - - X-Openstack-Request-Id: request_id - - trigger_info: trigger - - id: trigger_id - - type: trigger_type - - name: trigger_name - - properties: trigger_properties - -Response Example ----------------- - -.. literalinclude:: ./samples/trigger-update-response.json - :language: javascript - - -Delete trigger -============== - -.. rest_method:: DELETE /v1/{tenant_id}/triggers/{trigger_id} - -Deletes a specific trigger. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 202 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - trigger_id: trigger_id_1 diff --git a/api-ref/source/v1/karbor-v1-verifications.inc b/api-ref/source/v1/karbor-v1-verifications.inc deleted file mode 100644 index a02b933a..00000000 --- a/api-ref/source/v1/karbor-v1-verifications.inc +++ /dev/null @@ -1,185 +0,0 @@ -.. -*- rst -*- - -============= -Verifications -============= - -This API enables the Karbor user to do verifications of the specify checkpoint: - -- Create a verification for a given checkpoint. -- List all verifications of a given project. -- Show the information of a given verification. - -When you perform the above operation, these status values are possible: - -+-----------------+----------------------------------------------------------+ -| Status | Description | -+=================+==========================================================+ -| in_progress | A verify operation is in progress. | -+-----------------+----------------------------------------------------------+ -| success | A verify operation is success. | -+-----------------+----------------------------------------------------------+ -| fail | A verify operation is failed. | -+-----------------+----------------------------------------------------------+ - - -List All Verifications -====================== - -.. rest_method:: GET /v1/{tenant_id}/verifications - -Lists all verifications offered for the given project. - - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - marker: marker - -Response --------- - -.. rest_parameters:: parameters.yaml - - - verifications: verifications - - id: verification_id - - project_id: tenant_id_1 - - provider_id: provider_id - - checkpoint_id: checkpoint_id - - parameters: verification_parameters - - status: verification_status - - resources_status: resources_status - - resources_reason: resources_reason - - verifications_links: verifications_links - - Response Example ----------------- - -.. literalinclude:: ./samples/verifications-list-response.json - :language: javascript - - -Create verification -=================== - -.. rest_method:: PUT /v1/{tenant_id}/verifications - -Execute the verify operation for the specified provider and checkpoint. - -Response codes --------------- - -.. rest_status_code:: success ../status.yaml - - - 200 - -.. rest_status_code:: error ../status.yaml - - - 401 - - 403 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - verification: verification - - provider_id: provider_id - - checkpoint_id: checkpoint_id - - parameters: verification_parameters - -Request Example ---------------- - -.. literalinclude:: ./samples/verification-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - verification: verification - - id: verification_id - - project_id: tenant_id_1 - - provider_id: provider_id - - checkpoint_id: checkpoint_id - - parameters: verification_parameters - - status: verification_status - - resources_status: resources_status - - resources_reason: resources_reason - -Response Example ----------------- - -.. literalinclude:: ./samples/verification-create-response.json - :language: javascript - - -Show Verification -================= - -.. rest_method:: GET /v1/{tenant_id}/verifications/{verification_id} - -Show the information of a given verify operation. - - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - verification_id: verification_id_path - -Response --------- - -.. rest_parameters:: parameters.yaml - - - verification: verification - - id: verification_id - - project_id: tenant_id_1 - - provider_id: provider_id - - checkpoint_id: checkpoint_id - - parameters: verification_parameters - - status: verification_status - - resources_status: resources_status - - resources_reason: resources_reason - - Response Example ----------------- - -.. literalinclude:: ./samples/verification-create-response.json - :language: javascript - diff --git a/api-ref/source/v1/parameters.yaml b/api-ref/source/v1/parameters.yaml deleted file mode 100644 index 6567d05c..00000000 --- a/api-ref/source/v1/parameters.yaml +++ /dev/null @@ -1,690 +0,0 @@ -# variables in header -request_id: - description: | - A unique ID for tracking service request. The request ID associated - with the request by default appears in the service logs. - in: header - required: true - type: UUID - -# variables in path -checkpoint_id_1: - description: | - The UUID of a checkpoint. - in: path - required: true - type: UUID -operation_id_1: - description: | - The UUID of the scheduled operation. - in: path - required: true - type: UUID -operation_log_id_1: - description: | - The UUID of the operation log. - in: path - required: true - type: UUID -protectable_type_1: - description: | - The name of a specified protectable type. - in: path - required: true - type: string -provider_id_1: - description: | - The UUID of a provider. - in: path - required: true - type: UUID -resource_id: - description: | - The UUID of a resource. - in: path - required: true - type: UUID -restore_id_1: - description: | - The UUID of the restore. - in: path - required: true - type: UUID -service_id_path: - description: | - The ID of the service. - in: path - required: true - type: integer -tenant_id: - description: | - The UUID of the tenant in a multi-tenancy cloud. - in: path - required: true - type: string -trigger_id_1: - description: | - The UUID of the trigger. - in: path - required: true - type: UUID -verification_id_path: - description: | - The UUID of the verify. - in: path - required: true - type: UUID - - -# variables in query -binary_query: - description: | - Filter the service list result by binary name of the service. - in: query - required: false - type: string -host_query: - description: | - Filter the service list result by host name of the service. - in: query - required: false - type: string -limit: - description: | - Requests a page size of items. Returns a number - of items up to a limit value. Use the ``limit`` parameter to make - an initial limited request and use the ID of the last-seen item - from the response as the ``marker`` parameter value in a - subsequent limited request. - in: query - required: false - type: integer -marker: - description: | - The ID of the last-seen item. Use the ``limit`` - parameter to make an initial limited request and use the ID of the - last-seen item from the response as the ``marker`` parameter value - in a subsequent limited request. - in: query - required: false - type: string -sort: - description: | - Comma-separated list of sort keys and optional - sort directions in the form of < key > [: < direction > ]. A valid - direction is ``asc`` (ascending) or ``desc`` (descending). - in: query - required: false - type: string - - -# variables in body -binary_required: - description: | - The binary name of the service. - in: body - required: true - type: string -checkpoint: - description: | - A ``checkpoint`` object. - in: body - required: true - type: object -checkpoint_id: - description: | - The UUID of the checkpoint. - in: body - required: true - type: UUID -checkpoint_id_2: - description: | - The UUID of the checkpoint. - in: body - required: false - type: UUID -checkpoint_list: - description: | - The list of ``checkpoint`` objects. - in: body - required: true - type: array -checkpoint_status: - description: | - The status of checkpoint. A valid value is ``error``, ``protecting``, - ``available``, ``deleting``, ``deleted`` or ``error-deleting``. - in: body - required: true - type: string -dependent_resources: - description: | - All dependent resources for a given protectable instance, It can be an - empty list. - in: body - required: true - type: array -dependent_types: - description: | - All dependent protectable type, It can be an empty list. - in: body - required: true - type: array -description: - description: | - The description for this object, resource, operation and so on. May be - ``NULL``. - in: body - required: true - type: string -disabled_reason_body_req: - description: | - The reason for disabling a service. - in: body - required: true - type: string -enabled: - description: | - "``1``" means ``Enable``, "``0``" means ``Disable``. - in: body - required: true - type: int -ended_at: - description: | - The ended time of the operation. - in: body - required: false - type: string -error_info: - description: | - The error info of of the operation. - in: body - required: false - type: dict -extra_info: - description: | - The extra information for checkpoint. - in: body - required: false - type: dict -extra_info_1: - description: | - The extra info of of the operation. - in: body - required: false - type: dict -host_name_body_req: - description: | - The name of the host. - in: body - required: true - type: string -links: - description: | - Links for transfer. - in: body - required: true - type: array -operation: - description: | - A ``scheduled operation`` object. - in: body - required: true - type: object -operation_definition: - description: | - The operation definition for scheduled operation, include ``provider_id`` - and ``plan_id`` key. - in: body - required: true - type: dict -operation_id: - description: | - The UUID of the scheduled operation. - in: body - required: true - type: UUID -operation_id_2: - description: | - The UUID of the scheduled operation. - in: body - required: false - type: UUID -operation_list: - description: | - The list of ``scheduled operation`` objects. - in: body - required: true - type: array -operation_log: - description: | - A ``operation_log`` object. - in: body - required: true - type: object -operation_log_id: - description: | - The UUID of the operation_log. - in: body - required: true - type: UUID -operation_log_list: - description: | - The list of ``operation_log`` objects. - in: body - required: true - type: array -operation_log_status: - description: | - The status of operation logs. - in: body - required: false - type: string -operation_name: - description: | - The name of the scheduled operation. - in: body - required: true - type: string -operation_type: - description: | - The type of the scheduled operation. - in: body - required: true - type: string -operation_type_1: - description: | - The type of the operation acitons. - in: body - required: true - type: UUID -os-resetState: - description: | - A ``os-resetState`` object. - in: body - required: true - type: object -plan: - description: | - A ``plan`` object. - in: body - required: true - type: object -plan_id: - description: | - The UUID of the plan. - in: body - required: true - type: UUID -plan_id_1: - description: | - The UUID of the plan. - in: body - required: false - type: UUID -plan_list: - description: | - The list of ``plan`` objects. - in: body - required: true - type: array -plan_name: - description: | - The name of the plan. - in: body - required: true - type: string -plan_name_1: - description: | - The name of the plan. - in: body - required: false - type: string -plan_parameters: - description: | - The specified parameters for plan. - in: body - required: true - type: dict -plan_status: - description: | - The status of plan. A valid value is ``started`` or ``suspended``. - in: body - required: true - type: string -plan_status_1: - description: | - The status of plan. A valid value is ``started`` or ``suspended``. - in: body - required: false - type: string -protectable_instance: - description: | - A ``protectable_instance`` object. - in: body - required: true - type: object -protectable_instance_id: - description: | - The UUID of a protectable instance. - in: body - required: true - type: UUID -protectable_instance_list: - description: | - The list of ``protectable_instance`` objects. - in: body - required: true - type: array -protectable_instance_name: - description: | - The name of a protectable instance. - in: body - required: true - type: string -protectable_name: - description: | - The name of the protectable type. - in: body - required: true - type: string -protectable_type: - description: | - All the available protection types. - in: body - required: true - type: array -protectable_type_2: - description: | - A ``protectable_type`` object. - in: body - required: true - type: object -protectable_type_3: - description: | - The name of a specified protectable type. - in: body - required: true - type: string -provider: - description: | - A ``provider`` object. - in: body - required: true - type: object -provider_id: - description: | - The UUID of the provider. - in: body - required: true - type: UUID -provider_id_2: - description: | - The UUID of the provider. - in: body - required: false - type: UUID -provider_list: - description: | - The list of ``provider`` objects. - in: body - required: true - type: array -provider_name: - description: | - The name of the provider. - in: body - required: true - type: string -resource_graph: - description: | - The resource graph of protection plan corresponding to the checkpoint. - in: body - required: true - type: array -resources: - description: | - The list of all resources in plan. - in: body - required: true - type: array -resources_1: - description: | - The list of all resources in plan. - in: body - required: false - type: array -resources_reason: - description: | - Map of the verify reason for each resource. - in: body - required: true - type: dict -resources_status: - description: | - Map of the verify status for each resource. - in: body - required: true - type: dict -restore: - description: | - A ``restore`` object. - in: body - required: true - type: object -restore_auth: - description: | - The authentication for restore, include "``type``" "``username``" and - "``password``" key. - in: body - required: true - type: dict -restore_id: - description: | - The UUID of the restore. - in: body - required: true - type: UUID -restore_id_2: - description: | - The UUID of the restore. - in: body - required: false - type: UUID -restore_list: - description: | - The list of ``restore`` objects. - in: body - required: true - type: array -restore_parameters: - description: | - specifies each resource or resource type paramters according to protection - plugin's restore schema. - in: body - required: true - type: dict -restore_resource_reason: - description: | - Map of the reason for failure of each resource in the restore - in: body - required: true - type: dict -restore_resource_status: - description: | - Map of the restore status for each resource - in: body - required: true - type: dict -restore_status: - description: | - The status of restore. A valid value is "``started``" "``success``" or - "``failed``". - in: body - required: true - type: string -restore_target: - description: | - The target of the restore operation. - in: body - required: true - type: string -schema_extended_info: - description: | - The extended info schema for provider. - in: body - required: true - type: dict -schema_restore: - description: | - The restore schema for provider. - in: body - required: true - type: dict -schema_saved_info: - description: | - The saved info schema for provider. - in: body - required: true - type: dict -service_body: - description: | - A ``service`` object. - in: body - required: true - type: object -service_id: - description: | - UUID for the cleanup service. - in: body - required: true - type: integer -service_state_up_down: - description: | - The state of the service. One of ``up`` or ``down``. - in: body - required: true - type: string -service_status: - description: | - The status of the service. One of ``enabled`` or ``disabled``. - in: body - required: true - type: string -services: - description: | - A list of service objects. - in: body - required: true - type: array -started_at: - description: | - The started time of the operation. - in: body - required: false - type: string -state: - description: | - The status of checkpoint. A valid value is ``error`` or ``available``. - in: body - required: true - type: string -tenant_id_1: - description: | - The UUID of the tenant in a multi-tenancy cloud. - in: body - required: true - type: string -trigger: - description: | - A ``trigger`` object. - in: body - required: true - type: object -trigger_id: - description: | - The UUID of the trigger. - in: body - required: true - type: UUID -trigger_list: - description: | - The list of ``trigger`` objects. - in: body - required: true - type: array -trigger_name: - description: | - The name of the trigger. - in: body - required: true - type: string -trigger_name_1: - description: | - The name of the trigger. - in: body - required: false - type: string -trigger_properties: - description: | - The property list for trigger. it must include "``pattern``" and - "``format``", may include "``window``" "``start_time``" and "``end_time``". - in: body - required: true - type: dict -trigger_properties_1: - description: | - The property list for trigger. it must include "``pattern``" and - "``format``", may include "``window``" "``start_time``" and "``end_time``". - in: body - required: false - type: dict -trigger_type: - description: | - The type of the trigger. - in: body - required: true - type: string -updated: - description: | - The date and time stamp when the extension was - last updated. - in: body - required: true - type: string -verification: - description: | - A ``verification`` object. - in: body - required: true - type: object -verification_id: - description: | - The UUID of the verify. - in: body - required: true - type: UUID -verification_parameters: - description: | - The specified parameters for verification. - in: body - required: true - type: dict -verification_status: - description: | - The status of verify. A valid value is "``in_progress``" "``success``" or - "``fail``". - in: body - required: true - type: string -verifications: - description: | - A list of verification objects. - in: body - required: true - type: array -verifications_links: - description: | - Links for transfer. - in: body - required: true - type: array diff --git a/api-ref/source/v1/samples/checkpoint-create-request.json b/api-ref/source/v1/samples/checkpoint-create-request.json deleted file mode 100644 index 61ebc3f9..00000000 --- a/api-ref/source/v1/samples/checkpoint-create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "checkpoint": { - "plan_id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "extra_info": { - "create-by": "operation-engine", - "trigger_id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba" - } - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/checkpoint-create-response.json b/api-ref/source/v1/samples/checkpoint-create-response.json deleted file mode 100644 index 378c17a4..00000000 --- a/api-ref/source/v1/samples/checkpoint-create-response.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "checkpoint": { - "id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "project_id": "e486a2f49695423ca9c47e589b948108", - "status": "available", - "protection_plan": { - "id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "name": "My 3 tier application", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume" - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume" - } - ] - }, - "resource_graph": "[{'0x3': ['OS::Cinder::Volume', '33b6bb0b-1157-4e66-8553-1c9e14b1c7ba', 'Data volume'], '0x2': ['OS::Cinder::Volume', '25336116-f38e-4c22-81ad-e9b7bd71ba51', 'System volume'], '0x1': ['OS::Nova::Server', 'cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01', 'App server'], '0x0': ['OS::Glance::Image', '99777fdd-8a5b-45ab-ba2c-52420008103f', 'cirros-0.3.4-x86_64-uec']}, [['0x1', ['0x0']]]]" - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/checkpoint-delete-response.json b/api-ref/source/v1/samples/checkpoint-delete-response.json deleted file mode 100644 index 9e26dfee..00000000 --- a/api-ref/source/v1/samples/checkpoint-delete-response.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/api-ref/source/v1/samples/checkpoint-reset-state-response.json b/api-ref/source/v1/samples/checkpoint-reset-state-response.json deleted file mode 100644 index 9e26dfee..00000000 --- a/api-ref/source/v1/samples/checkpoint-reset-state-response.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/api-ref/source/v1/samples/checkpoints-list-response.json b/api-ref/source/v1/samples/checkpoints-list-response.json deleted file mode 100644 index 5644582c..00000000 --- a/api-ref/source/v1/samples/checkpoints-list-response.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "checkpoints": [ - { - "id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "project_id": "e486a2f49695423ca9c47e589b948108", - "status": "available", - "protection_plan": { - "id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "name": "My 3 tier application", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume" - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume" - } - ] - }, - "resource_graph": "[{'0x3': ['OS::Cinder::Volume', '33b6bb0b-1157-4e66-8553-1c9e14b1c7ba', 'Data volume'], '0x2': ['OS::Cinder::Volume', '25336116-f38e-4c22-81ad-e9b7bd71ba51', 'System volume'], '0x1': ['OS::Nova::Server', 'cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01', 'App server'], '0x0': ['OS::Glance::Image', '99777fdd-8a5b-45ab-ba2c-52420008103f', 'cirros-0.3.4-x86_64-uec']}, [['0x1', ['0x0']]]]" - } - ], - "checkpoints_links": [ - { - "href": "/v1/{project_id}/checkpoints?limit={limit_num}&marker=dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "rel": "next" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/operation-log-show-response.json b/api-ref/source/v1/samples/operation-log-show-response.json deleted file mode 100644 index 0cbb100d..00000000 --- a/api-ref/source/v1/samples/operation-log-show-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "operation_log": { - "status": "available", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:43:22.000000", - "started_at": "2017-07-28T08:42:02.000000", - "id": "7a16c731-0658-47dd-aa3b-98ee21830e23", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/operation-logs-list-response.json b/api-ref/source/v1/samples/operation-logs-list-response.json deleted file mode 100644 index 034ec2fe..00000000 --- a/api-ref/source/v1/samples/operation-logs-list-response.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "operation_logs": [{ - "status": "deleted", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T09:02:57.000000", - "started_at": "2017-07-28T09:02:41.000000", - "id": "f0aa664b-f385-4618-bc27-9e0116cceea7", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - }, - { - "status": "success", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": "7c0d396a-981b-4953-95f5-30382ddaa8bf", - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:58:08.000000", - "started_at": "2017-07-28T08:57:36.000000", - "id": "8736649d-857e-4637-923c-3bdb35edd74e", - "extra_info": null, - "plan_id": null, - "scheduled_operation_id": null, - "operation_type": "restore" - }, - { - "status": "available", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:43:22.000000", - "started_at": "2017-07-28T08:42:02.000000", - "id": "7a16c731-0658-47dd-aa3b-98ee21830e23", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - }], - "operation_logs_links": [{ - "href": "/v1/{project_id}/operation_logs?limit={limit_num}&marker=7a16c731-0658-47dd-aa3b-98ee21830e23", - "rel": "next" - }] -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/plan-create-request.json b/api-ref/source/v1/samples/plan-create-request.json deleted file mode 100644 index b01fa68a..00000000 --- a/api-ref/source/v1/samples/plan-create-request.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "plan": { - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/plan-create-response.json b/api-ref/source/v1/samples/plan-create-response.json deleted file mode 100644 index 9183be0a..00000000 --- a/api-ref/source/v1/samples/plan-create-response.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "plan": { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "suspended", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/plan-update-request.json b/api-ref/source/v1/samples/plan-update-request.json deleted file mode 100644 index f62c3bda..00000000 --- a/api-ref/source/v1/samples/plan-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "plan":{ - "status": "started", - "name": "My 1 tier application" - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/plan-update-response.json b/api-ref/source/v1/samples/plan-update-response.json deleted file mode 100644 index 06f90bb8..00000000 --- a/api-ref/source/v1/samples/plan-update-response.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "plan": { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 1 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "started", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/plans-list-response.json b/api-ref/source/v1/samples/plans-list-response.json deleted file mode 100644 index 7ea57544..00000000 --- a/api-ref/source/v1/samples/plans-list-response.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "plans": [ - { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "suspended", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } - ], - "plans_links": [ - { - "href": "/v1/{project_id}/plans?limit={limit_num}&marker=9e5475d2-6425-4986-9136-a4f09642297f", - "rel": "next" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/protectable-instance-show-response.json b/api-ref/source/v1/samples/protectable-instance-show-response.json deleted file mode 100644 index 30ebed16..00000000 --- a/api-ref/source/v1/samples/protectable-instance-show-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "instance": { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "My VM", - "dependent_resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - } - ] - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/protectable-instances-list-response.json b/api-ref/source/v1/samples/protectable-instances-list-response.json deleted file mode 100644 index 5ac7e7b2..00000000 --- a/api-ref/source/v1/samples/protectable-instances-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "instances":[ - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } -], - "instances_links": [ - { - "href": "/v1/{project_id}/instances?limit=1&marker=cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "rel": "next" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/protectable-show-response.json b/api-ref/source/v1/samples/protectable-show-response.json deleted file mode 100644 index 0bce7e07..00000000 --- a/api-ref/source/v1/samples/protectable-show-response.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "protectable_type": { - "name": "OS::Nova::Server", - "dependent_types": [ - "OS::Cinder::Volume", - "OS::Glance::Image" - ] - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/protectables-list-response.json b/api-ref/source/v1/samples/protectables-list-response.json deleted file mode 100644 index a96e51b1..00000000 --- a/api-ref/source/v1/samples/protectables-list-response.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "protectable_type": [ - "OS::Keystone::Project", - "OS::Cinder::Volume", - "OS::Glance::Image", - "OS::Nova::Server" - ] -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/provider-show-response.json b/api-ref/source/v1/samples/provider-show-response.json deleted file mode 100644 index b51a97b0..00000000 --- a/api-ref/source/v1/samples/provider-show-response.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "provider": { - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "name": "OS Infra Provider", - "description": "This provider uses OpenStack's own services (swift, cinder) as storage", - "extended_info_schema": { - "options_schema": { - "OS::Cinder::Volume": { - "required": [ - "backup_mode" - ], - "type": "object", - "properties": { - "backup_mode": { - "default": "auto", - "enum": [ - "full", - "incremental", - "auto" - ], - "type": "string", - "description": "The backup mode.", - "title": "Backup Mode" - } - }, - "title": "Cinder Protection Options" - } - }, - "saved_info_schema": { - "OS::Cinder::Volume": { - "required": [ - "name" - ], - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name for this backup.", - "title": "Name" - } - }, - "title": "Cinder Protection Saved Info" - } - }, - "restore_schema": { - "OS::Cinder::Volume": { - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "description": "The name of the restored volume.", - "title": "Restore Name" - } - }, - "title": "Cinder Protection Restore" - } - } - } - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/providers-list-response.json b/api-ref/source/v1/samples/providers-list-response.json deleted file mode 100644 index 45f5e60b..00000000 --- a/api-ref/source/v1/samples/providers-list-response.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "providers": [ - { - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "name": "OS Infra Provider", - "description": "This provider uses OpenStack's own services (swift, cinder) as storage", - "extended_info_schema": { - "options_schema": { - "OS::Cinder::Volume": { - "required": [ - "backup_mode" - ], - "type": "object", - "properties": { - "backup_mode": { - "default": "auto", - "enum": [ - "full", - "incremental", - "auto" - ], - "type": "string", - "description": "The backup mode.", - "title": "Backup Mode" - } - }, - "title": "Cinder Protection Options" - } - }, - "saved_info_schema": { - "OS::Cinder::Volume": { - "required": [ - "name" - ], - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name for this backup.", - "title": "Name" - } - }, - "title": "Cinder Protection Saved Info" - } - }, - "restore_schema": { - "OS::Cinder::Volume": { - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "description": "The name of the restored volume.", - "title": "Restore Name" - } - }, - "title": "Cinder Protection Restore" - } - } - } - } - ], - "providers_links": [ - { - "href": "/v1/{project_id}/providers?limit={limit_num}&marker=cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "rel": "next" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/restore-create-request.json b/api-ref/source/v1/samples/restore-create-request.json deleted file mode 100644 index 7eeab166..00000000 --- a/api-ref/source/v1/samples/restore-create-request.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "restore": { - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "restore_auth": { - "type": "password", - "username": "admin", - "password": "secretadmin" - }, - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - } - } -} diff --git a/api-ref/source/v1/samples/restore-create-response.json b/api-ref/source/v1/samples/restore-create-response.json deleted file mode 100644 index 797fbd8e..00000000 --- a/api-ref/source/v1/samples/restore-create-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "restore": { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "restore_auth": { - "type": "password", - "username": "admin", - "password": "***" - }, - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "resource_status": { - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": "restoring", - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "restoring" - }, - "resource_reason": { - }, - "status": "success" - } -} diff --git a/api-ref/source/v1/samples/restores-list-response.json b/api-ref/source/v1/samples/restores-list-response.json deleted file mode 100644 index ee5e0b2f..00000000 --- a/api-ref/source/v1/samples/restores-list-response.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "restores": [ - { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "restore_auth": { - "type": "password", - "username": "admin", - "password": "***" - }, - "resource_status": { - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": "restoring", - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "error" - }, - "resource_reason": { - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "Backup not found" - }, - "status": "success" - } - ], - "restores_links": [ - { - "href": "/v1/{project_id}/restores?limit={limit_num}&marker=22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "rel": "next" - } - ] -} diff --git a/api-ref/source/v1/samples/scheduled-operation-create-request.json b/api-ref/source/v1/samples/scheduled-operation-create-request.json deleted file mode 100644 index 7450ee25..00000000 --- a/api-ref/source/v1/samples/scheduled-operation-create-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{"scheduled_operation": { - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "23902b02-5666-4ee6-8dfe-962ac09c3995", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - } - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/scheduled-operation-create-response.json b/api-ref/source/v1/samples/scheduled-operation-create-response.json deleted file mode 100644 index 563a954b..00000000 --- a/api-ref/source/v1/samples/scheduled-operation-create-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{"scheduled_operation": { - "id": "1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "23902b02-5666-4ee6-8dfe-962ac09c3995", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - }, - "enabled": 1 - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/scheduled-operations-list-response.json b/api-ref/source/v1/samples/scheduled-operations-list-response.json deleted file mode 100644 index 604bf61d..00000000 --- a/api-ref/source/v1/samples/scheduled-operations-list-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{"operations": [ - {"scheduled_operation": { - "id": "1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "23902b02-5666-4ee6-8dfe-962ac09c3995", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - }, - "enabled": 1 - } - }, - ], - "operations_links": [ - { - "href": "/v1/{project_id}/scheduled_operations?limit={limit_num}&marker=1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "rel": "next" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/services-list-response.json b/api-ref/source/v1/samples/services-list-response.json deleted file mode 100644 index 1234e2ba..00000000 --- a/api-ref/source/v1/samples/services-list-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "services": [{ - "status": "enabled", - "binary": "karbor-operationengine", - "state": "down", - "updated_at": "2019-11-03T11:50:25.000000", - "host": "devstack", - "disabled_reason": null, - "id": 1 - }, { - "status": "enabled", - "binary": "karbor-operationengine", - "state": "up", - "updated_at": "2019-11-11T02:56:05.000000", - "host": "devstack", - "disabled_reason": null, - "id": 2 - }, { - "status": "enabled", - "binary": "karbor-protection", - "state": "up", - "updated_at": "2019-11-11T02:56:03.000000", - "host": "devstack", - "disabled_reason": null, - "id": 3 - }] -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/services-update-request.json b/api-ref/source/v1/samples/services-update-request.json deleted file mode 100644 index e5485bf9..00000000 --- a/api-ref/source/v1/samples/services-update-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "status": "disabled", - "disabled_reason": "service down" -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/services-update-response.json b/api-ref/source/v1/samples/services-update-response.json deleted file mode 100644 index 641b69a5..00000000 --- a/api-ref/source/v1/samples/services-update-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "service": { - "status": "disabled", - "binary": "karbor-operationengine", - "state": "down", - "updated_at": "2019-11-03T11:50:25.000000", - "host": "devstack", - "disabled_reason": null, - "id": 1 - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/trigger-create-request.json b/api-ref/source/v1/samples/trigger-create-request.json deleted file mode 100644 index 3446ec9e..00000000 --- a/api-ref/source/v1/samples/trigger-create-request.json +++ /dev/null @@ -1,12 +0,0 @@ -{"trigger_info": { - "name": "My backup trigger", - "type": "time", - "properties": { - "format": "calendar", - "pattern": "BEGIN:VEVENT\\nRRULE:FREQ=HOURLY;INTERVAL=1;\\nEND:VEVENT", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600" - } - } -} diff --git a/api-ref/source/v1/samples/trigger-create-response.json b/api-ref/source/v1/samples/trigger-create-response.json deleted file mode 100644 index eeb55590..00000000 --- a/api-ref/source/v1/samples/trigger-create-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{"trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "name": "My backup trigger", - "type": "time", - "properties": { - "format": "calendar", - "pattern": "BEGIN:VEVENT\\nRRULE:FREQ=HOURLY;INTERVAL=1;\\nEND:VEVENT", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600" - } - } -} diff --git a/api-ref/source/v1/samples/trigger-update-request.json b/api-ref/source/v1/samples/trigger-update-request.json deleted file mode 100644 index 234c8350..00000000 --- a/api-ref/source/v1/samples/trigger-update-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{"trigger_info": { - "name": "Trigger for backup", - "properties": { - "format": "calendar", - "pattern": "BEGIN:VEVENT\\nRRULE:FREQ=HOURLY;INTERVAL=1;\\nEND:VEVENT", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600" - } - } -} diff --git a/api-ref/source/v1/samples/trigger-update-response.json b/api-ref/source/v1/samples/trigger-update-response.json deleted file mode 100644 index c4192cb5..00000000 --- a/api-ref/source/v1/samples/trigger-update-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{"trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "name": "Trigger for backup", - "type": "time", - "properties": { - "format": "calendar", - "pattern": "BEGIN:VEVENT\\nRRULE:FREQ=HOURLY;INTERVAL=1;\\nEND:VEVENT", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600" - } - } -} diff --git a/api-ref/source/v1/samples/triggers-list-response.json b/api-ref/source/v1/samples/triggers-list-response.json deleted file mode 100644 index efda52f8..00000000 --- a/api-ref/source/v1/samples/triggers-list-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{"triggers": [ - {"trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "type": "time", - "name": "My backup trigger", - "properties": { - "format": "calendar", - "pattern": "BEGIN:VEVENT\\nRRULE:FREQ=HOURLY;INTERVAL=1;\\nEND:VEVENT", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600" - } - } - } - ], - "triggers_links": [ - { - "href": "/v1/{project_id}/triggers?limit={limit_num}&marker=2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "rel": "next" - } - ] -} diff --git a/api-ref/source/v1/samples/verification-create-response.json b/api-ref/source/v1/samples/verification-create-response.json deleted file mode 100644 index d4d1010f..00000000 --- a/api-ref/source/v1/samples/verification-create-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "verification": { - "status": "in_progress", - "resources_status": {}, - "provider_id": "6659007d-6f66-4a0f-9cb4-17d6aded0bb9", - "parameters": {}, - "checkpoint_id": "a7418b97-0474-4360-9600-5a08fd60a2b6", - "project_id": "5d3242420cb641ac840cd4ae37af0f18", - "id": "8c1df1c5-29ab-4f77-8b45-bebc3aacf996", - "resources_reason": {} - } -} \ No newline at end of file diff --git a/api-ref/source/v1/samples/verifications-list-response.json b/api-ref/source/v1/samples/verifications-list-response.json deleted file mode 100644 index 3641a9c9..00000000 --- a/api-ref/source/v1/samples/verifications-list-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{"verifications": [ - { - "status": "fail", - "resources_status": {}, - "provider_id": "6659007d-6f66-4a0f-9cb4-17d6aded0bb9", - "parameters": {}, - "checkpoint_id": "35486bac-1b14-4158-a403-916cf09a5596", - "project_id": "5d3242420cb641ac840cd4ae37af0f18", - "id": "7f7bd4a4-0a96-4f03-9553-d6c6cf7b36b8", - "resources_reason": {} - }, - { - "status": "success", - "resources_status": { - "OS::Glance::Image#1a220464-6449-4fb6-8169-14f3016c4bb9": "available" - }, - "provider_id": "6659007d-6f66-4a0f-9cb4-17d6aded0bb9", - "parameters": {}, - "checkpoint_id": "dd8bbf5a-6759-4ce2-a64b-854db2fa7541", - "project_id": "5d3242420cb641ac840cd4ae37af0f18", - "id": "2c126c95-3e69-46fb-83f0-b63e9770906f", - "resources_reason": { - "OS::Glance::Image#1a220464-6449-4fb6-8169-14f3016c4bb9": ""} - }] -} \ No newline at end of file diff --git a/api-ref/source/v1/status.yaml b/api-ref/source/v1/status.yaml deleted file mode 100644 index 250ac734..00000000 --- a/api-ref/source/v1/status.yaml +++ /dev/null @@ -1,62 +0,0 @@ -################# -# Success Codes # -################# -200: - default: | - Request was successful. -201: - default: | - Resource was created and is ready to use. -202: - default: | - Request was accepted for processing, but the processing has not been - completed. A 'location' header is included in the response which contains - a link to check the progress of the request. -204: - default: | - The server has fulfilled the request by deleting the resource. -300: - default: | - There are multiple choices for resources. The request has to be more - specific to successfully retrieve one of these resources. -302: - default: | - The response is about a redirection hint. The header of the response - usually contains a 'location' value where requesters can check to track - the real location of the resource. - -################# -# Error Codes # -################# - -400: - default: | - Some content in the request was invalid. - resource_signal: | - The target resource doesn't support receiving a signal. -401: - default: | - User must authenticate before making a request. -403: - default: | - Policy does not allow current user to do this operation. -404: - default: | - The requested resource could not be found. -405: - default: | - Method is not valid for this endpoint. -409: - default: | - This operation conflicted with another operation on this resource. - duplcate_zone: | - There is already a zone with this name. -500: - default: | - Something went wrong inside the service. This should not happen usually. - If it does happen, it means the server has experienced some serious - problems. -503: - default: | - Service is not available. This is mostly caused by service configuration - errors which prevents the service from successful start up. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb7..00000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/devstack/devstackgaterc b/devstack/devstackgaterc deleted file mode 100644 index 8b763590..00000000 --- a/devstack/devstackgaterc +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# This script is executed in the OpenStack CI job that runs DevStack + tempest. -# It is also used by the rally job. You can find the CI job configuration here: -# -# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/karbor.yaml -# - -s=rabbit,mysql,key -s+=,n-cpu,n-api,n-cond,n-sch,n-cauth,placement-api,n-api-meta -# n-obj has been removed from mitaka -if [[ "stable/kilo stable/liberty" =~ $ZUUL_BRANCH ]]; then - s+=,n-obj -fi - -if [[ "stable/kilo stable/liberty stable/mitaka stable/ocata" =~ $ZUUL_BRANCH ]]; then - s+=,n-crt -fi - -s+=,neutron,q-svc,q-agt,q-dhcp,q-meta -s+=,cinder,g-api,g-reg -s+=,c-api,c-vol,c-sch,c-bak -s+=,s-proxy,s-object,s-container,s-account -s+=,h-eng,h-api,h-api-cfn,h-api-cw -s+=,manila,m-api,m-sch,m-shr,m-dat -s+=,karbor,karbor-api,karbor-operationengine,karbor-protection -ENABLED_SERVICES=$s -export ENABLED_SERVICES - -DEFAULT_EXTRA_SPECS="'snapshot_support=True create_share_from_snapshot_support=True mount_snapshot_support=True driver_handles_share_servers=False'" - -DEVSTACK_LOCAL_CONFIG+="API_WORKERS=4" -DEVSTACK_LOCAL_CONFIG+=$'\n'"VOLUME_BACKING_FILE_SIZE=20490M" -DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin karbor https://git.openstack.org/openstack/karbor" -DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin manila https://git.openstack.org/openstack/manila" -DEVSTACK_LOCAL_CONFIG+=$'\n'"MANILA_OPTGROUP_generic1_driver_handles_share_servers=False" -DEVSTACK_LOCAL_CONFIG+=$'\n'"MANILA_USE_SERVICE_INSTANCE_PASSWORD=True" -DEVSTACK_LOCAL_CONFIG+=$'\n'"MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS=$DEFAULT_EXTRA_SPECS" -DEVSTACK_LOCAL_CONFIG+=$'\n'"SHARE_DRIVER=manila.share.drivers.lvm.LVMShareDriver" - -DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5" -DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_REPLICAS=1" -DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_DATA_DIR=$DEST/data" -DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_LOOPBACK_DISK_SIZE=10G" -DEVSTACK_LOCAL_CONFIG+=$'\n'"DATABASE_PASSWORD=password" -DEVSTACK_LOCAL_CONFIG+=$'\n'"RABBIT_PASSWORD=password" -DEVSTACK_LOCAL_CONFIG+=$'\n'"SERVICE_PASSWORD=password" -DEVSTACK_LOCAL_CONFIG+=$'\n'"SERVICE_TOKEN=password" -DEVSTACK_LOCAL_CONFIG+=$'\n'"ADMIN_PASSWORD=password" - -export DEVSTACK_LOCAL_CONFIG diff --git a/devstack/files/apache-karbor-api.template b/devstack/files/apache-karbor-api.template deleted file mode 100644 index 42f898c9..00000000 --- a/devstack/files/apache-karbor-api.template +++ /dev/null @@ -1,26 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess osapi_karbor processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} - WSGIProcessGroup osapi_karbor - WSGIScriptAlias / %KARBOR_BIN_DIR%/karbor-wsgi - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/karbor-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - \ No newline at end of file diff --git a/devstack/local.conf.sample b/devstack/local.conf.sample deleted file mode 100644 index 8899206a..00000000 --- a/devstack/local.conf.sample +++ /dev/null @@ -1,44 +0,0 @@ -[[local|localrc]] - -DATABASE_PASSWORD=password -RABBIT_PASSWORD=password -SERVICE_PASSWORD=password -SERVICE_TOKEN=password -ADMIN_PASSWORD=password - -enable_plugin karbor https://git.openstack.org/openstack/karbor master -enable_plugin karbor-dashboard https://git.openstack.org/openstack/karbor-dashboard master - -#run the services you want to use -ENABLED_SERVICES=rabbit,mysql,key -ENABLED_SERVICES+=,n-cpu,n-api,n-obj,n-cond,n-sch,n-novnc,n-cauth,n-api-meta -ENABLED_SERVICES+=,placement-api -ENABLED_SERVICES+=,neutron,q-svc,q-agt,q-dhcp,q-meta -ENABLED_SERVICES+=,cinder,g-api,g-reg -ENABLED_SERVICES+=,c-api,c-vol,c-sch,c-bak,horizon -ENABLED_SERVICES+=,manila,m-api,m-sch,m-shr,m-dat - -#Add the karbor services -enable_service karbor-api -enable_service karbor-operationengine -enable_service karbor-protection - -#Add the karbor-dashboard services -enable_service karbor-dashboard - -#disable the default services you don't want to use -disable_service n-net - -SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 -SWIFT_REPLICAS=1 -SWIFT_DATA_DIR=$DEST/data -enable_service s-proxy s-object s-container s-account - -# Enable Manila - -enable_plugin manila https://git.openstack.org/openstack/manila master - -MANILA_OPTGROUP_generic1_driver_handles_share_servers=False -MANILA_USE_SERVICE_INSTANCE_PASSWORD=True -MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS="snapshot_support=True create_share_from_snapshot_support=True mount_snapshot_support=True driver_handles_share_servers=False" -SHARE_DRIVER=manila.share.drivers.lvm.LVMShareDriver diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index 980a6a64..00000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,272 +0,0 @@ -# Devstack extras script to install Karbor - -# Test if any karbor services are enabled -# is_karbor_enabled -function is_karbor_enabled { - echo "Checking if Karbor is Enabled" - [[ ,${ENABLED_SERVICES} =~ ,"karbor-" ]] && Q_ENABLE_KARBOR="False" - Q_ENABLE_KARBOR="True" -} - -function _create_karbor_conf_dir { - - # Put config files in ``KARBOR_CONF_DIR`` for everyone to find - - sudo install -d -o $STACK_USER $KARBOR_CONF_DIR - -} - -# create_karbor_accounts() - Set up common required karbor accounts -# Tenant User Roles -# ------------------------------------------------------------------ -# service karbor service -function create_karbor_accounts { - if is_service_enabled karbor-api karbor-protection karbor-operationengine; then - create_service_user "$KARBOR_TRUSTEE_USER" "admin" - fi - - if is_service_enabled karbor-api; then - get_or_create_service "$KARBOR_SERVICE_NAME" "$KARBOR_SERVICE_TYPE" "Application Data Protection Service" - - get_or_create_endpoint "$KARBOR_SERVICE_TYPE" "$REGION_NAME" \ - "$KARBOR_API_ENDPOINT" \ - "$KARBOR_API_ENDPOINT" \ - "$KARBOR_API_ENDPOINT" - fi -} - - -# karbor_config_apache_wsgi() - Set WSGI config files -function karbor_config_apache_wsgi { - local karbor_apache_conf - karbor_apache_conf=$(apache_site_config_for osapi_karbor) - local karbor_ssl="" - local karbor_certfile="" - local karbor_keyfile="" - local karbor_api_port=$KARBOR_API_PORT - - if is_ssl_enabled_service karbor-api; then - karbor_ssl="SSLEngine On" - karbor_certfile="SSLCertificateFile $KARBOR_SSL_CERT" - karbor_keyfile="SSLCertificateKeyFile $KARBOR_SSL_KEY" - fi - - # copy proxy vhost file - sudo cp $KARBOR_API_APACHE_TEMPLATE $karbor_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$karbor_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%APIWORKERS%|$API_WORKERS|g - s|%KARBOR_BIN_DIR%|$KARBOR_BIN_DIR|g; - s|%SSLENGINE%|$karbor_ssl|g; - s|%SSLCERTFILE%|$karbor_certfile|g; - s|%SSLKEYFILE%|$karbor_keyfile|g; - s|%USER%|$STACK_USER|g; - " -i $karbor_apache_conf -} - -function karbor_config_uwsgi { - write_uwsgi_config "$KARBOR_API_UWSGI_CONF" "$KARBOR_API_UWSGI" "/$KARBOR_SERVICE_TYPE" -} - -# clean_karbor_api_mod_wsgi() - Remove wsgi files, disable and remove apache vhost file -function clean_karbor_api_mod_wsgi { - sudo rm -f $(apache_site_config_for osapi_karbor) -} - -function clean_karbor_api_uwsgi { - remove_uwsgi_config "$KARBOR_API_UWSGI_CONF" "$KARBOR_API_UWSGI" -} - -# start_karbor_api_mod_wsgi() - Start the API processes ahead of other things -function start_karbor_api_mod_wsgi { - enable_apache_site osapi_karbor - restart_apache_server - tail_log karbor-api /var/log/$APACHE_NAME/karbor-api.log - - echo "Waiting for Karbor API to start..." - if ! wait_for_service $SERVICE_TIMEOUT $KARBOR_API_ENDPOINT; then - die $LINENO "karbor-api mod_wsgi did not start" - fi -} - -function start_karbor_api_uwsgi { - run_process karbor-api "$(which uwsgi) --ini $KARBOR_API_UWSGI_CONF" "" - - echo "Waiting for Karbor API to start..." - if ! wait_for_service $SERVICE_TIMEOUT $KARBOR_API_ENDPOINT; then - die $LINENO "karbor-api uwsgi did not start" - fi -} - -# stop_karbor_api_mod_wsgi() - Disable the api service and stop it. -function stop_karbor_api_mod_wsgi { - disable_apache_site osapi_karbor - restart_apache_server -} - -function stop_karbor_api_uwsgi { - remove_uwsgi_config "$KARBOR_API_UWSGI_CONF" "$KARBOR_API_UWSGI" - stop_process karbor-api -} - -function configure_karbor { - if is_service_enabled karbor-api karbor-operationengine karbor-protection ; then - echo "Configuring Karbor API" - - # generate configuration file - cd $KARBOR_DIR - oslo-config-generator --config-file etc/oslo-config-generator/karbor.conf --output-file etc/karbor.conf.sample - cp etc/karbor.conf.sample etc/karbor.conf - - cp $KARBOR_DIR/etc/karbor.conf $KARBOR_CONF - cp $KARBOR_DIR/etc/api-paste.ini $KARBOR_CONF_DIR - - if [[ -f $KARBOR_DIR/etc/policy.json ]]; then - cp $KARBOR_DIR/etc/policy.json $KARBOR_CONF_DIR - fi - - cp -R $KARBOR_DIR/etc/providers.d $KARBOR_CONF_DIR - cp $KARBOR_DIR/devstack/providers.d/* $KARBOR_CONF_DIR/providers.d - - iniset $KARBOR_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $KARBOR_CONF DEFAULT use_syslog $SYSLOG - iniset $KARBOR_CONF DEFAULT min_interval $KARBOR_SCHEDULED_MIN_INTERVAL - iniset $KARBOR_CONF DEFAULT min_window_time $KARBOR_SCHEDULED_MIN_WINDOW_TIME - iniset $KARBOR_CONF DEFAULT max_window_time $KARBOR_SCHEDULED_MAX_WINDOW_TIME - echo "Configuring Karbor API Database" - iniset $KARBOR_CONF database connection `database_connection_url karbor` - iniset_rpc_backend karbor $KARBOR_CONF - - # Configure for trustee - iniset $KARBOR_CONF trustee auth_type password - iniset $KARBOR_CONF trustee auth_url $KEYSTONE_AUTH_URI - iniset $KARBOR_CONF trustee username $KARBOR_TRUSTEE_USER - iniset $KARBOR_CONF trustee password $SERVICE_PASSWORD - iniset $KARBOR_CONF trustee user_domain_id default - - setup_colorized_logging $KARBOR_CONF DEFAULT - echo "Configuring Karbor API colorized" - if is_service_enabled keystone; then - - echo "Configuring Karbor keystone Auth" - create_karbor_cache_dir - - # Configure auth token middleware - configure_auth_token_middleware $KARBOR_CONF karbor \ - $KARBOR_AUTH_CACHE_DIR - - - # Configure for clients_keystone - iniset $KARBOR_CONF clients_keystone auth_uri $KEYSTONE_AUTH_URI - - # Config karbor client - iniset $KARBOR_CONF karbor_client service_name $KARBOR_SERVICE_NAME - iniset $KARBOR_CONF karbor_client service_type $KARBOR_SERVICE_TYPE - iniset $KARBOR_CONF karbor_client version 1 - - else - iniset $KARBOR_CONF DEFAULT auth_strategy noauth - fi - fi -} - -function configure_providers { - if is_swift_enabled; then - echo_summary "Configuring Swift Bank" - iniset $KARBOR_CONF_DIR/providers.d/openstack-infra.conf swift_client swift_key $SERVICE_PASSWORD - fi -} - -function create_karbor_cache_dir { - - # Delete existing dir - sudo rm -rf $KARBOR_AUTH_CACHE_DIR - sudo mkdir -p $KARBOR_AUTH_CACHE_DIR - sudo chown `whoami` $KARBOR_AUTH_CACHE_DIR - -} - -function install_karborclient { - if use_library_from_git "python-karborclient"; then - echo_summary "Installing Karbor Client from git" - git_clone $KARBORCLIENT_REPO $KARBORCLIENT_DIR $KARBORCLIENT_BRANCH - setup_develop $KARBORCLIENT_DIR - fi -} - -is_karbor_enabled - -if [[ "$Q_ENABLE_KARBOR" == "True" ]]; then - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - echo summary "Karbor pre-install" - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - install_karborclient - - echo_summary "Installing Karbor" - - setup_package $KARBOR_DIR -e - _create_karbor_conf_dir - - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Karbor" - - configure_karbor - configure_providers - - if [[ "$KARBOR_DEPLOY" == "mod_wsgi" ]]; then - karbor_config_apache_wsgi - elif [[ "$KARBOR_DEPLOY" == "uwsgi" ]]; then - karbor_config_uwsgi - fi - - echo export PYTHONPATH=\$PYTHONPATH:$KARBOR_DIR >> $RC_DIR/.localrc.auto - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - - echo_summary "Creating Karbor entities for auth service" - create_karbor_accounts - - echo_summary "Initializing Karbor Service" - - if is_service_enabled $DATABASE_BACKENDS; then - # (re)create karbor database - recreate_database karbor utf8 - - # Migrate karbor database - $KARBOR_BIN_DIR/karbor-manage db sync - fi - if is_service_enabled karbor-api; then - if [[ "$KARBOR_DEPLOY" == "mod_wsgi" ]]; then - start_karbor_api_mod_wsgi - elif [[ "$KARBOR_DEPLOY" == "uwsgi" ]]; then - start_karbor_api_uwsgi - fi - fi - if is_service_enabled karbor-operationengine; then - run_process karbor-operationengine "$KARBOR_BIN_DIR/karbor-operationengine --config-file $KARBOR_CONF" - fi - if is_service_enabled karbor-protection; then - run_process karbor-protection "$KARBOR_BIN_DIR/karbor-protection --config-file $KARBOR_CONF" - fi - fi - - if [[ "$1" == "unstack" ]]; then - - if is_service_enabled karbor-api; then - if [[ "$KARBOR_DEPLOY" == "mod_wsgi" ]]; then - stop_karbor_api_mod_wsgi - clean_karbor_api_mod_wsgi - elif [[ "$KARBOR_DEPLOY" == "uwsgi" ]]; then - stop_karbor_api_uwsgi - clean_karbor_api_uwsgi - fi - fi - if is_service_enabled karbor-operationengine; then - stop_process karbor-operationengine - fi - if is_service_enabled karbor-protection; then - stop_process karbor-protection - fi - fi -fi diff --git a/devstack/providers.d/eisoo.conf b/devstack/providers.d/eisoo.conf deleted file mode 100644 index cf41929b..00000000 --- a/devstack/providers.d/eisoo.conf +++ /dev/null @@ -1,24 +0,0 @@ -[provider] -name=EISOO Provider -description=This provider provides data protection for applications with EISOO AnyBackup -id=e4008868-be97-492c-be41-44e50ef2e16f - -bank=karbor-swift-bank-plugin - -enabled=False - -[swift_client] -swift_auth_url=http://127.0.0.1/identity -swift_user=demo -swift_key=password -swift_tenant_name=demo - -[swift_bank_plugin] -lease_expire_window=120 -lease_renew_window=100 -lease_validity_window=100 - -[eisoo_client] -eisoo_endpoint=https://172.17.238.11:9801 -eisoo_app_id=MTQ3NzAyMDg0MC41OQ== -eisoo_app_secret=OEVBM0IyQkQ3OEZGMDIxNTFGRUVDRjMwOTIzM0IyQ0M= diff --git a/devstack/providers.d/openstack-fs-bank.conf b/devstack/providers.d/openstack-fs-bank.conf deleted file mode 100644 index 7bd375cd..00000000 --- a/devstack/providers.d/openstack-fs-bank.conf +++ /dev/null @@ -1,14 +0,0 @@ -[provider] -name = OS Infra Provider with local file system bank -description = This provider uses local file system as the bank of karbor -id = 6659007d-6f66-4a0f-9cb4-17d6aded0bb9 - -plugin=karbor-volume-protection-plugin -plugin=karbor-image-protection-plugin -plugin=karbor-server-protection-plugin -bank=karbor-fs-bank-plugin - -enabled=True - -[file_system_bank_plugin] -file_system_bank_path=/opt/stack/karbor_fs_bank diff --git a/devstack/providers.d/openstack-infra-volume-snapshot.conf b/devstack/providers.d/openstack-infra-volume-snapshot.conf deleted file mode 100644 index d942c288..00000000 --- a/devstack/providers.d/openstack-infra-volume-snapshot.conf +++ /dev/null @@ -1,20 +0,0 @@ -[provider] -name = OS Infra Provider with volume snapshot plugin -description = This provider uses OpenStack's own services (swift, cinder) as storage -id = 90d5bfea-a259-41e6-80c6-dcfcfcd9d827 - -plugin=karbor-volume-snapshot-plugin -bank=karbor-swift-bank-plugin - -enabled=True - -[swift_client] -swift_auth_url=http://127.0.0.1/identity -swift_user=demo -swift_key=password -swift_tenant_name=demo - -[swift_bank_plugin] -lease_expire_window=120 -lease_renew_window=100 -lease_validity_window=100 diff --git a/devstack/providers.d/openstack-s3-bank.conf b/devstack/providers.d/openstack-s3-bank.conf deleted file mode 100644 index 806b8ef8..00000000 --- a/devstack/providers.d/openstack-s3-bank.conf +++ /dev/null @@ -1,23 +0,0 @@ -[provider] -name = OS Infra Provider with S3 compatible storage bank -description = This provider uses S3 compatible storage as the bank of karbor -id = c8e52e4d-0479-43e0-b1a1-318c86798cb8 - -plugin=karbor-volume-protection-plugin -plugin=karbor-image-protection-plugin -plugin=karbor-server-protection-plugin -bank=karbor-s3-bank-plugin - -enabled=False - -[s3_client] -s3_endpoint=http://127.0.0.1:7480 -s3_access_key=demo -s3_secret_key=password - -[s3_bank_plugin] -lease_expire_window=600 -lease_renew_window=120 -lease_validity_window=100 -bank_s3_object_bucket=karbor -bank_s3_lease_bucket=lease \ No newline at end of file diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 0c0ed82f..00000000 --- a/devstack/settings +++ /dev/null @@ -1,42 +0,0 @@ -# Git information -KARBOR_REPO=${KARBOR_REPO:-https://opendev.org/openstack/karbor/} -KARBOR_DIR=$DEST/karbor -KARBOR_BRANCH=${KARBOR_BRANCH:-master} -KARBORCLIENT_REPO=${KARBORCLIENT_REPO:-"https://opendev.org/openstack/python-karborclient/"} -KARBORCLIENT_DIR=$DEST/python-karborclient -KARBORCLIENT_BRANCH=${KARBORCLIENT_BRANCH:-master} - -# common variables -KARBOR_SERVICE_NAME=karbor -KARBOR_SERVICE_TYPE=data-protect -KARBOR_CONF_DIR=${KARBOR_CONF_DIR:-/etc/karbor} -KARBOR_BIN_DIR=$(get_python_exec_prefix) -KARBOR_CONF=$KARBOR_CONF_DIR/karbor.conf -KARBOR_TRUSTEE_USER=karbor -KARBOR_SCHEDULED_MIN_INTERVAL=60 -KARBOR_SCHEDULED_MIN_WINDOW_TIME=20 -KARBOR_SCHEDULED_MAX_WINDOW_TIME=30 - -KARBOR_API_UWSGI_CONF=$KARBOR_CONF_DIR/karbor-uwsgi.ini -KARBOR_API_UWSGI=$KARBOR_BIN_DIR/karbor-wsgi - -# karbor rest api -KARBOR_API=$KARBOR_DIR/karbor/cmd/api.py -KARBOR_API_APACHE_TEMPLATE=$KARBOR_DIR/devstack/files/apache-karbor-api.template - -KARBOR_API_LISTEN_ADDRESS=${KARBOR_API_LISTEN_ADDRESS:-0.0.0.0} -KARBOR_API_HOST=${KARBOR_API_HOST:-$SERVICE_HOST} -KARBOR_API_PORT=${KARBOR_API_PORT:-8799} -KARBOR_API_PROTOCOL=${KARBOR_API_PROTOCOL:-$SERVICE_PROTOCOL} -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - KARBOR_DEPLOY=uwsgi - KARBOR_API_ENDPOINT="$KARBOR_API_PROTOCOL://$KARBOR_API_HOST/$KARBOR_SERVICE_TYPE/v1/\$(project_id)s" -else - KARBOR_DEPLOY=mod_wsgi - KARBOR_API_ENDPOINT="$KARBOR_API_PROTOCOL://$KARBOR_API_HOST:$KARBOR_API_PORT/v1/\$(project_id)s" -fi - - -KARBOR_AUTH_CACHE_DIR=${KARBOR_AUTH_CACHE_DIR:-/var/cache/karbor} - -export PYTHONPATH=$PYTHONPATH:$KARBOR_DIR diff --git a/doc/images/3-tier-app.png b/doc/images/3-tier-app.png deleted file mode 100644 index a2e654da..00000000 Binary files a/doc/images/3-tier-app.png and /dev/null differ diff --git a/doc/images/Karbor.png b/doc/images/Karbor.png deleted file mode 100644 index 4791bf36..00000000 Binary files a/doc/images/Karbor.png and /dev/null differ diff --git a/doc/images/KarborInPieces.png b/doc/images/KarborInPieces.png deleted file mode 100644 index c9701f14..00000000 Binary files a/doc/images/KarborInPieces.png and /dev/null differ diff --git a/doc/images/api-service-class-diagram.png b/doc/images/api-service-class-diagram.png deleted file mode 100644 index 310f571b..00000000 Binary files a/doc/images/api-service-class-diagram.png and /dev/null differ diff --git a/doc/images/available_protectables.svg b/doc/images/available_protectables.svg deleted file mode 100644 index aa34d47b..00000000 --- a/doc/images/available_protectables.svg +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - - - - - - - Karbor Protectables - - This file contains the dependecy between protectables in the default - distribution of Karbor. The arrows, similar to inheritance point to the parent - since Protectables define what types they depend on so this reflects who is - responsible for the connection. - - OS::Glance::Image - - - - OS::Nova::Server - - - - OS::Keystone::Project - - - - OS::Cinder::Volume - - - - OS::Neutron::Topology - - - - OS::Manila::Share - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/doc/images/class_diagram.svg b/doc/images/class_diagram.svg deleted file mode 100644 index 4b426d0a..00000000 --- a/doc/images/class_diagram.svg +++ /dev/null @@ -1,208 +0,0 @@ - - - - - - - - - - - - "Karbor API model" - - - - Protectable - - name: string - instances: []Resource - is_root: bool - - - - - Resource - - id: UUID - type: ResourceType - schema: JSONSchema - dependent_resources: []Resource - - - - - Trigger - - - - - - TimedTrigger - - - - - - EventTrigger - - - - - - Checkpoint - - id: UUID - tenant_id: UUID - plan: ProtectionPlan - status: string - started_at: DateTime - - - - - ProtectionPlan - - id: UUID - is_enabled: boolean - name: string - status: ePlanStatus - resources: []Resource - protection_provider: ProtectionProvider - parameters: dict - - - - - AutomaticOperation - - id: UUID - name: string - description: string - tenant_id: UUID - - - - - «abstract» - ScheduledOperation - - trigger: Trigger - - - - - BackupPlan - - protection_plan: ProtectionPlan - - - - - DeleteCheckpoints - - query: string - protection_provider: ProtectionProvider - - - - - ProtectionProvider - - name: string - description: string - extended_info_schema: [ResourceType]JSONSchema - options_schema: [ResourceType]JSONSchema - restore_options: [ResourceType]JSONSchema - checkpoints: []Checkpoint - - - - - ePlanStatus - - started - suspended - - - - - RestoreTarget - - keystone_uri: URI - - - - - Restore - - id: UUID - project_id: UUID - target: RestoreTarget - provider: ProtectionProvider - checkpoint: Checkpoint - started_at: string - - - - lists - - - - - - - - - stores a copy of - - - - - when should the operation should trigger - - - - - - - - - - - - - lists - - - - - aggregates - 1 - N - - - - - - - - - restores to - - \ No newline at end of file diff --git a/doc/images/high_level_architecture.png b/doc/images/high_level_architecture.png deleted file mode 100644 index 0e5d059b..00000000 Binary files a/doc/images/high_level_architecture.png and /dev/null differ diff --git a/doc/images/karbor-api.png b/doc/images/karbor-api.png deleted file mode 100644 index 31702c2a..00000000 Binary files a/doc/images/karbor-api.png and /dev/null differ diff --git a/doc/images/mascot/OpenStack_Project_Karbor_Horizontal.png b/doc/images/mascot/OpenStack_Project_Karbor_Horizontal.png deleted file mode 100644 index 46c2c499..00000000 Binary files a/doc/images/mascot/OpenStack_Project_Karbor_Horizontal.png and /dev/null differ diff --git a/doc/images/mascot/OpenStack_Project_Karbor_horizontal.eps b/doc/images/mascot/OpenStack_Project_Karbor_horizontal.eps deleted file mode 100644 index 1fe95fe5..00000000 Binary files a/doc/images/mascot/OpenStack_Project_Karbor_horizontal.eps and /dev/null differ diff --git a/doc/images/mascot/OpenStack_Project_Karbor_horizontal_1color.eps b/doc/images/mascot/OpenStack_Project_Karbor_horizontal_1color.eps deleted file mode 100644 index 7845e3c4..00000000 Binary files a/doc/images/mascot/OpenStack_Project_Karbor_horizontal_1color.eps and /dev/null differ diff --git a/doc/images/mascot/OpenStack_Project_Karbor_mascot.png b/doc/images/mascot/OpenStack_Project_Karbor_mascot.png deleted file mode 100644 index db02db9b..00000000 Binary files a/doc/images/mascot/OpenStack_Project_Karbor_mascot.png and /dev/null differ diff --git a/doc/images/mascot/OpenStack_Project_Karbor_mascot_small.png b/doc/images/mascot/OpenStack_Project_Karbor_mascot_small.png deleted file mode 100644 index 3c08167e..00000000 Binary files a/doc/images/mascot/OpenStack_Project_Karbor_mascot_small.png and /dev/null differ diff --git a/doc/images/mascot/OpenStack_Project_Karbor_vertical.eps b/doc/images/mascot/OpenStack_Project_Karbor_vertical.eps deleted file mode 100644 index cf0f374b..00000000 Binary files a/doc/images/mascot/OpenStack_Project_Karbor_vertical.eps and /dev/null differ diff --git a/doc/images/mascot/OpenStack_Project_Karbor_vertical.png b/doc/images/mascot/OpenStack_Project_Karbor_vertical.png deleted file mode 100644 index aff0286f..00000000 Binary files a/doc/images/mascot/OpenStack_Project_Karbor_vertical.png and /dev/null differ diff --git a/doc/images/mascot/OpenStack_Project_Karbor_vertical_1color.eps b/doc/images/mascot/OpenStack_Project_Karbor_vertical_1color.eps deleted file mode 100644 index 4a3b299d..00000000 Binary files a/doc/images/mascot/OpenStack_Project_Karbor_vertical_1color.eps and /dev/null differ diff --git a/doc/images/operation-engine/create_scheduled_operation_seq_diagram.png b/doc/images/operation-engine/create_scheduled_operation_seq_diagram.png deleted file mode 100644 index 8f83741c..00000000 Binary files a/doc/images/operation-engine/create_scheduled_operation_seq_diagram.png and /dev/null differ diff --git a/doc/images/operation-engine/delete_scheduled_operation_seq_diagram.png b/doc/images/operation-engine/delete_scheduled_operation_seq_diagram.png deleted file mode 100644 index 84eefab6..00000000 Binary files a/doc/images/operation-engine/delete_scheduled_operation_seq_diagram.png and /dev/null differ diff --git a/doc/images/operation-engine/operation_engine_architecture_diagram.png b/doc/images/operation-engine/operation_engine_architecture_diagram.png deleted file mode 100644 index 18c47ee6..00000000 Binary files a/doc/images/operation-engine/operation_engine_architecture_diagram.png and /dev/null differ diff --git a/doc/images/operation-engine/operation_engine_class_diagram.png b/doc/images/operation-engine/operation_engine_class_diagram.png deleted file mode 100644 index ef9553d8..00000000 Binary files a/doc/images/operation-engine/operation_engine_class_diagram.png and /dev/null differ diff --git a/doc/images/operation-engine/operation_state_diagram.png b/doc/images/operation-engine/operation_state_diagram.png deleted file mode 100644 index 2320dd7a..00000000 Binary files a/doc/images/operation-engine/operation_state_diagram.png and /dev/null differ diff --git a/doc/images/pluggable_protection_provider.svg b/doc/images/pluggable_protection_provider.svg deleted file mode 100644 index 581e3e61..00000000 --- a/doc/images/pluggable_protection_provider.svg +++ /dev/null @@ -1 +0,0 @@ -Pluggable Protection ProviderResourceTypeStringResourcetype: ResourceTypeid: UUIDResourceGraphNoderesource: Resourcedependent_resources: []ResourceGraphNodeResourceGraphWalkerconstructor(sources: []ResourceGraphNode)add_listener(listener: ResourceGraphWalkerListener)walk()ResourceGraphWalkerListeneron_node_enter(node: ResourceGraphNode, is_first_visit: boolean)on_node_exit(node: ResourceGraphNode, is_first_visit: boolean)ProtectableRegistryfetch_dependant_resources(resource: Resource): []Resourceregister(resource_type: ResourceType, protectable: Protectable)list_resources(resource_type: ResourceType): [] ResourceProtectablepossible_parent_types(resource_type: ResourceType): []ResourceTypefetch_child_resources(resource: Resource): []Resourcelist_resources(resource_type: ResourceType): []ResourceOperationprotectstartsuspendrestoredeleteContextplan: ProtectionPlanoperation: Operationparameters: dictresource: ResourceGraphNodebank_section: BankSectionis_first_visit: booleantask_builder: TaskBuilderBankPluginBankSectionis_writeable(): boolBankPluginInterfaceTaskBuilderadd_task(target: function, args=collection): Tasklink_tasks(a: Task, b: Task)TaskOpaque objectProtectionPluginget_supported_resources_types(): []ResourceTypemetadata functionson_resource_start(context: Context)on_resource_end(context: Context)graph walk functionsget_options_schema(resource_type: ResourceType)get_saved_info_schema(resource_type: ResourceType)get_restore_schema(resource_type: ResourceType)get_saved_info(metadata_store: MetadataStore, resource: Resource)schema functionsProtectionProviderPluggableProtectionProviderplugins: [ResourceType]ProtectionPlugin10..*<<creates>>1*CreatesUses for functionality1..*uses it to iterate over graph \ No newline at end of file diff --git a/doc/images/protection-service/class-diagram.png b/doc/images/protection-service/class-diagram.png deleted file mode 100644 index 24864195..00000000 Binary files a/doc/images/protection-service/class-diagram.png and /dev/null differ diff --git a/doc/images/protection-service/class_diagram.png b/doc/images/protection-service/class_diagram.png deleted file mode 100644 index deaf15f1..00000000 Binary files a/doc/images/protection-service/class_diagram.png and /dev/null differ diff --git a/doc/images/protection-service/hooks.png b/doc/images/protection-service/hooks.png deleted file mode 100644 index afc589c7..00000000 Binary files a/doc/images/protection-service/hooks.png and /dev/null differ diff --git a/doc/images/protection-service/protect-rpc-call-seq-diagram.png b/doc/images/protection-service/protect-rpc-call-seq-diagram.png deleted file mode 100644 index 29b6bb6f..00000000 Binary files a/doc/images/protection-service/protect-rpc-call-seq-diagram.png and /dev/null differ diff --git a/doc/images/protection-service/protection-architecture.png b/doc/images/protection-service/protection-architecture.png deleted file mode 100644 index 139ca0c4..00000000 Binary files a/doc/images/protection-service/protection-architecture.png and /dev/null differ diff --git a/doc/images/protection-service/restore-processing-sequence-flow.png b/doc/images/protection-service/restore-processing-sequence-flow.png deleted file mode 100644 index d0847ca9..00000000 Binary files a/doc/images/protection-service/restore-processing-sequence-flow.png and /dev/null differ diff --git a/doc/images/protection_provider.png b/doc/images/protection_provider.png deleted file mode 100644 index c20bedfb..00000000 Binary files a/doc/images/protection_provider.png and /dev/null differ diff --git a/doc/images/resource_tree_architecture.png b/doc/images/resource_tree_architecture.png deleted file mode 100644 index 27ff1379..00000000 Binary files a/doc/images/resource_tree_architecture.png and /dev/null differ diff --git a/doc/source/admin/client.rst b/doc/source/admin/client.rst deleted file mode 100644 index 42b6efda..00000000 --- a/doc/source/admin/client.rst +++ /dev/null @@ -1,604 +0,0 @@ -======================= -Using the Karbor Client -======================= - -Environment Variables ---------------------- - -To use cinder or karbor client, we should provide Keystone authentication -variables. - -.. code-block:: console - - export OS_USERNAME=admin - export OS_PASSWORD=123456 - export OS_TENANT_NAME=admin - export OS_AUTH_URL=http://10.229.47.230/identity/ - -Provider --------- - -List the provider - -.. code-block:: console - - karbor provider-list - +--------------------------------------+-------------------+-------------------------------------------------------------------------------------+ - | Id | Name | Description | - +--------------------------------------+-------------------+-------------------------------------------------------------------------------------+ - | b766f37c-d011-4026-8228-28730d734a3f | No-Op Provider | This provider does nothing for each protect and restore operation. Used for testing | - | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | OS Infra Provider | This provider uses OpenStack's own services (swift, cinder) as storage | - | e4008868-be97-492c-be41-44e50ef2e16f | EISOO Provider | This provider provides data protection for applications with EISOO AnyBackup | - +--------------------------------------+-------------------+-------------------------------------------------------------------------------------+ - -Show the provider information - -.. code-block:: console - - karbor provider-show cf56bd3e-97a7-4078-b6d5-f36246333fd9 - +----------------------+---------------------------------------------------------------------------------------------+ - | Property | Value | - +----------------------+---------------------------------------------------------------------------------------------+ - | description | This provider uses OpenStack's own services (swift, cinder) as storage | - | extended_info_schema | { | - | | "options_schema": { | - | | "OS::Cinder::Volume": { | - | | "properties": { | - | | "backup_mode": { | - | | "default": "auto", | - | | "description": "The backup mode.", | - | | "enum": [ | - | | "full", | - | | "incremental", | - | | "auto" | - | | ], | - | | "title": "Backup Mode", | - | | "type": "string" | - | | }, | - | | "backup_name": { | - | | "description": "The name of the backup.", | - | | "title": "Backup Name", | - | | "type": "string" | - | | }, | - | | "container": { | - | | "description": "The container which been chosen.", | - | | "title": "Container", | - | | "type": "string" | - | | }, | - | | "description": { | - | | "description": "The description of the volume.", | - | | "title": "Description", | - | | "type": "string" | - | | }, | - | | "force": { | - | | "default": false, | - | | "description": "Whether to backup, even if the volumeis attached", | - | | "title": "Force", | - | | "type": "boolean" | - | | } | - | | }, | - | | "required": [ | - | | "backup_name", | - | | "backup_mode", | - | | "container", | - | | "force" | - | | ], | - | | "title": "Cinder Protection Options", | - | | "type": "object" | - | | }, | - | | "OS::Glance::Image": { | - | | "properties": { | - | | "backup_name": { | - | | "default": null, | - | | "description": "The name of the backup.", | - | | "title": "Backup Name", | - | | "type": "string" | - | | } | - | | }, | - | | "required": [], | - | | "title": "Image Protection Options", | - | | "type": "object" | - | | }, | - | | "OS::Nova::Server": { | - | | "properties": {}, | - | | "required": [], | - | | "title": "Server Protection Options", | - | | "type": "object" | - | | } | - | | }, | - | | "restore_schema": { | - | | "OS::Cinder::Volume": { | - | | "properties": { | - | | "restore_description": { | - | | "default": null, | - | | "description": "The description of the restored volume.", | - | | "title": "Restore Description", | - | | "type": "string" | - | | }, | - | | "restore_name": { | - | | "default": null, | - | | "description": "The name of the restored volume.", | - | | "title": "Restore Name", | - | | "type": "string" | - | | }, | - | | "volume_id": { | - | | "description": "The target volume ID to restore to.", | - | | "title": "Volume ID", | - | | "type": "string" | - | | } | - | | }, | - | | "title": "Cinder Protection Restore", | - | | "type": "object" | - | | }, | - | | "OS::Glance::Image": { | - | | "properties": { | - | | "restore_name": { | - | | "description": "The name of the restore image", | - | | "title": "Restore Image Name", | - | | "type": "string" | - | | } | - | | }, | - | | "required": [ | - | | "backup_name" | - | | ], | - | | "title": "Image Protection Restore", | - | | "type": "object" | - | | }, | - | | "OS::Nova::Server": { | - | | "properties": { | - | | "restore_name": { | - | | "description": "The name of the restore server", | - | | "title": "Restore Server Name", | - | | "type": "string" | - | | } | - | | }, | - | | "required": [ | - | | "restore_name" | - | | ], | - | | "title": "Server Protection Restore", | - | | "type": "object" | - | | } | - | | }, | - | | "saved_info_schema": { | - | | "OS::Cinder::Volume": { | - | | "properties": { | - | | "fail_reason": { | - | | "description": "The reason for the failure status of the backup.", | - | | "title": "Fail Reason", | - | | "type": "string" | - | | }, | - | | "is_incremental": { | - | | "description": "The type of the backup, True is incremental and False is full.", | - | | "title": "Is Incremental", | - | | "type": "boolean" | - | | }, | - | | "name": { | - | | "description": "The name for this backup.", | - | | "title": "Name", | - | | "type": "string" | - | | }, | - | | "progress": { | - | | "constraint": { | - | | "max": 1, | - | | "min": 0 | - | | }, | - | | "description": "The current operation progress for this backup.", | - | | "title": "Progress", | - | | "type": "number" | - | | }, | - | | "size": { | - | | "description": "The size of the backup, in GB.", | - | | "title": "Size", | - | | "type": "integer" | - | | }, | - | | "status": { | - | | "description": "The backup status, such as available.", | - | | "enum": [ | - | | "creating", | - | | "available", | - | | "deleting", | - | | "error", | - | | "restoring", | - | | "error_restoring" | - | | ], | - | | "title": "Status", | - | | "type": "string" | - | | }, | - | | "volume_id": { | - | | "description": "The ID of the volume from which the backup was created.", | - | | "title": "Volume ID", | - | | "type": "string" | - | | } | - | | }, | - | | "required": [ | - | | "name", | - | | "status", | - | | "progress", | - | | "fail_reason", | - | | "size", | - | | "volume_id" | - | | ], | - | | "title": "Cinder Protection Saved Info", | - | | "type": "object" | - | | }, | - | | "OS::Glance::Image": { | - | | "properties": { | - | | "image_metadata": { | - | | "description": "To save disk_format and container_format", | - | | "title": "Image Metadata", | - | | "type": "image" | - | | } | - | | }, | - | | "required": [ | - | | "image_metadata" | - | | ], | - | | "title": "Image Protection Saved Info", | - | | "type": "object" | - | | }, | - | | "OS::Nova::Server": { | - | | "properties": { | - | | "attach_metadata": { | - | | "description": "The devices of attached volumes", | - | | "title": "Attached Volume Metadata", | - | | "type": "object" | - | | }, | - | | "snapshot_metadata": { | - | | "description": "The metadata of snapshot", | - | | "title": "Snapshot Metadata", | - | | "type": "object" | - | | } | - | | }, | - | | "required": [ | - | | "attached_metadata", | - | | "snapshot_metadata" | - | | ], | - | | "title": "Server Protection Saved Info", | - | | "type": "object" | - | | } | - | | } | - | | } | - | id | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | - | name | OS Infra Provider | - +----------------------+---------------------------------------------------------------------------------------------+ - -Protectables ------------- - -Use cinder client to create volumes - -.. code-block:: console - - cinder create 1 --name volume1 - cinder create 1 --name volume2 - cinder list - +--------------------------------------+-----------+---------+------+-------------+----------+-------------+ - | ID | Status | Name | Size | Volume Type | Bootable | Attached to | - +--------------------------------------+-----------+---------+------+-------------+----------+-------------+ - | 12e2abc6-f20b-430d-9b36-1a6befd23b6c | available | volume2 | 1 | lvmdriver-1 | false | | - | 700495ee-38e6-41a0-963f-f3f9a24c0f75 | available | volume1 | 1 | lvmdriver-1 | false | | - +--------------------------------------+-----------+---------+------+-------------+----------+-------------+ - -List the protectable resources - -.. code-block:: console - - karbor protectable-list - +-----------------------+ - | Protectable type | - +-----------------------+ - | OS::Cinder::Volume | - | OS::Glance::Image | - | OS::Keystone::Project | - | OS::Nova::Server | - +-----------------------+ - karbor protectable-show OS::Nova::Server - +-----------------+-----------------------------------------------+ - | Property | Value | - +-----------------+-----------------------------------------------+ - | dependent_types | [u'OS::Cinder::Volume', u'OS::Glance::Image'] | - | name | OS::Nova::Server | - +-----------------+-----------------------------------------------+ - karbor protectable-list-instances OS::Cinder::Volume - +--------------------------------------+--------------------+---------------------+ - | Id | Type | Dependent resources | - +--------------------------------------+--------------------+---------------------+ - | 12e2abc6-f20b-430d-9b36-1a6befd23b6c | OS::Cinder::Volume | [] | - | 700495ee-38e6-41a0-963f-f3f9a24c0f75 | OS::Cinder::Volume | [] | - +--------------------------------------+--------------------+---------------------+ - karbor protectable-show-instance OS::Cinder::Volume 12e2abc6-f20b-430d-9b36-1a6befd23b6c - +---------------------+--------------------------------------+ - | Property | Value | - +---------------------+--------------------------------------+ - | dependent_resources | [] | - | id | 12e2abc6-f20b-430d-9b36-1a6befd23b6c | - | name | volume2 | - | type | OS::Cinder::Volume | - +---------------------+--------------------------------------+ - -Plans ------ -Create a protection plan with a provider and resources - -.. code-block:: console - - karbor plan-create 'OS volumes protection plan.' 'cf56bd3e-97a7-4078-b6d5-f36246333fd9' '12e2abc6-f20b-430d-9b36-1a6befd23b6c'='OS::Cinder::Volume'='volume2','700495ee-38e6-41a0-963f-f3f9a24c0f75'='OS::Cinder::Volume'='volume1' - +-------------+----------------------------------------------------+ - | Property | Value | - +-------------+----------------------------------------------------+ - | description | None | - | id | ef8b83f3-d0c4-48ec-8949-5c72bbf14103 | - | name | OS volumes protection plan. | - | parameters | {} | - | provider_id | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | - | resources | [ | - | | { | - | | "id": "12e2abc6-f20b-430d-9b36-1a6befd23b6c", | - | | "name": "volume2", | - | | "type": "OS::Cinder::Volume" | - | | }, | - | | { | - | | "id": "700495ee-38e6-41a0-963f-f3f9a24c0f75", | - | | "name": "volume1", | - | | "type": "OS::Cinder::Volume" | - | | } | - | | ] | - | status | suspended | - +-------------+----------------------------------------------------+ - -Checkpoints ------------ -Execute a protect operation manually with a plan - -.. code-block:: console - - karbor checkpoint-create cf56bd3e-97a7-4078-b6d5-f36246333fd9 ef8b83f3-d0c4-48ec-8949-5c72bbf14103 - +-----------------+------------------------------------------------------+ - | Property | Value | - +-----------------+------------------------------------------------------+ - | created_at | None | - | extra_info | {"created_by": "manual"} | - | id | 80f6154f-cc43-441f-8841-35ae23e17f4f | - | project_id | 31478a6f980d4e73a3bdac3ad04a3605 | - | protection_plan | { | - | | "id": "ef8b83f3-d0c4-48ec-8949-5c72bbf14103", | - | | "name": "OS volumes protection plan.", | - | | "resources": [ | - | | { | - | | "id": "12e2abc6-f20b-430d-9b36-1a6befd23b6c", | - | | "name": "volume2", | - | | "type": "OS::Cinder::Volume" | - | | }, | - | | { | - | | "id": "700495ee-38e6-41a0-963f-f3f9a24c0f75", | - | | "name": "volume1", | - | | "type": "OS::Cinder::Volume" | - | | } | - | | ] | - | | } | - | resource_graph | None | - | status | protecting | - +-----------------+------------------------------------------------------+ - # check the protect result - cinder backup-list - +--------------------------------------+--------------------------------------+-----------+------+------+--------------+---------------+ - | ID | Volume ID | Status | Name | Size | Object Count | Container | - +--------------------------------------+--------------------------------------+-----------+------+------+--------------+---------------+ - | becf53cd-12f8-424d-9b08-54fbffe9495a | 700495ee-38e6-41a0-963f-f3f9a24c0f75 | available | - | 1 | 22 | volumebackups | - | c35317f4-df2a-4c7d-9f36-6495c563a5bf | 12e2abc6-f20b-430d-9b36-1a6befd23b6c | available | - | 1 | 22 | volumebackups | - +--------------------------------------+--------------------------------------+-----------+------+------+--------------+---------------+ - karbor checkpoint-show cf56bd3e-97a7-4078-b6d5-f36246333fd9 80f6154f-cc43-441f-8841-35ae23e17f4f - +-----------------+-----------------------------------------------------------+ - | Property | Value | - +-----------------+-----------------------------------------------------------+ - | created_at | 2017-02-13 | - | extra_info | {"created_by": "manual"} | - | id | 80f6154f-cc43-441f-8841-35ae23e17f4f | - | project_id | 31478a6f980d4e73a3bdac3ad04a3605 | - | protection_plan | { | - | | "id": "ef8b83f3-d0c4-48ec-8949-5c72bbf14103", | - | | "name": "OS volumes protection plan.", | - | | "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", | - | | "resources": [ | - | | { | - | | "id": "12e2abc6-f20b-430d-9b36-1a6befd23b6c", | - | | "name": "volume2", | - | | "type": "OS::Cinder::Volume" | - | | }, | - | | { | - | | "id": "700495ee-38e6-41a0-963f-f3f9a24c0f75", | - | | "name": "volume1", | - | | "type": "OS::Cinder::Volume" | - | | } | - | | ] | - | | } | - | resource_graph | [ | - | | { | - | | "0x0": [ | - | | "OS::Cinder::Volume", | - | | "700495ee-38e6-41a0-963f-f3f9a24c0f75", | - | | "volume1" | - | | ], | - | | "0x1": [ | - | | "OS::Cinder::Volume", | - | | "12e2abc6-f20b-430d-9b36-1a6befd23b6c", | - | | "volume2" | - | | ] | - | | }, | - | | [] | - | | ] | - | status | available | - +-----------------+-----------------------------------------------------------+ - -Restores --------- - -Execute a restore operation manually with a checkpoint id - -.. code-block:: console - - karbor restore-create cf56bd3e-97a7-4078-b6d5-f36246333fd9 80f6154f-cc43-441f-8841-35ae23e17f4f - +------------------+--------------------------------------+ - | Property | Value | - +------------------+--------------------------------------+ - | checkpoint_id | 80f6154f-cc43-441f-8841-35ae23e17f4f | - | id | f30cb640-594a-487b-9569-c26fd5861c95 | - | parameters | {} | - | project_id | 31478a6f980d4e73a3bdac3ad04a3605 | - | provider_id | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | - | resources_reason | {} | - | resources_status | {} | - | restore_target | None | - | status | in_progress | - +------------------+--------------------------------------+ - karbor restore-show f30cb640-594a-487b-9569-c26fd5861c95 - +------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Property | Value | - +------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ - | checkpoint_id | 80f6154f-cc43-441f-8841-35ae23e17f4f | - | id | f30cb640-594a-487b-9569-c26fd5861c95 | - | parameters | {} | - | project_id | 31478a6f980d4e73a3bdac3ad04a3605 | - | provider_id | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | - | resources_reason | {} | - | resources_status | {u'OS::Cinder::Volume#2b6e0055-bec0-41f5-85fa-a830a3684fd9': u'available', u'OS::Cinder::Volume#6c77fd44-c76b-400e-8aa4-97bce241b690': u'available'} | - | restore_target | None | - | status | success | - +------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ - cinder list - +--------------------------------------+-----------+---------------------------------------------------------------------------+------+-------------+----------+-------------+ - | ID | Status | Name | Size | Volume Type | Bootable | Attached to | - +--------------------------------------+-----------+---------------------------------------------------------------------------+------+-------------+----------+-------------+ - | 12e2abc6-f20b-430d-9b36-1a6befd23b6c | available | volume2 | 1 | lvmdriver-1 | false | | - | 2b6e0055-bec0-41f5-85fa-a830a3684fd9 | available | 80f6154f-cc43-441f-8841-35ae23e17f4f@12e2abc6-f20b-430d-9b36-1a6befd23b6c | 1 | lvmdriver-1 | false | | - | 6c77fd44-c76b-400e-8aa4-97bce241b690 | available | 80f6154f-cc43-441f-8841-35ae23e17f4f@700495ee-38e6-41a0-963f-f3f9a24c0f75 | 1 | lvmdriver-1 | false | | - | 700495ee-38e6-41a0-963f-f3f9a24c0f75 | available | volume1 | 1 | lvmdriver-1 | false | | - +--------------------------------------+-----------+---------------------------------------------------------------------------+------+-------------+----------+-------------+ - -Checkpoint Delete ------------------ - -Execute a delete operation manually with a checkpoint id - -.. code-block:: console - - cinder backup-list - +--------------------------------------+--------------------------------------+-----------+------+------+--------------+---------------+ - | ID | Volume ID | Status | Name | Size | Object Count | Container | - +--------------------------------------+--------------------------------------+-----------+------+------+--------------+---------------+ - | becf53cd-12f8-424d-9b08-54fbffe9495a | 700495ee-38e6-41a0-963f-f3f9a24c0f75 | available | - | 1 | 22 | volumebackups | - | c35317f4-df2a-4c7d-9f36-6495c563a5bf | 12e2abc6-f20b-430d-9b36-1a6befd23b6c | available | - | 1 | 22 | volumebackups | - +--------------------------------------+--------------------------------------+-----------+------+------+--------------+---------------+ - - karbor checkpoint-delete cf56bd3e-97a7-4078-b6d5-f36246333fd9 80f6154f-cc43-441f-8841-35ae23e17f4f - - cinder backup-list - +----+-----------+--------+------+------+--------------+-----------+ - | ID | Volume ID | Status | Name | Size | Object Count | Container | - +----+-----------+--------+------+------+--------------+-----------+ - +----+-----------+--------+------+------+--------------+-----------+ - -Checkpoint Reset State ----------------------- - -Execute a reset state operation manually with a checkpoint id - -.. code-block:: console - - karbor checkpoint-reset-state cf56bd3e-97a7-4078-b6d5-f36246333fd9 80f6154f-cc43-441f-8841-35ae23e17f4f --available - - # check the checkpoint status - karbor checkpoint-show cf56bd3e-97a7-4078-b6d5-f36246333fd9 80f6154f-cc43-441f-8841-35ae23e17f4f - +-----------------+-----------------------------------------------------------+ - | Property | Value | - +-----------------+-----------------------------------------------------------+ - | created_at | 2017-02-13 | - | extra_info | {"created_by": "manual"} | - | id | 80f6154f-cc43-441f-8841-35ae23e17f4f | - | project_id | 31478a6f980d4e73a3bdac3ad04a3605 | - | protection_plan | { | - | | "id": "ef8b83f3-d0c4-48ec-8949-5c72bbf14103", | - | | "name": "OS volumes protection plan.", | - | | "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", | - | | "resources": [ | - | | { | - | | "id": "12e2abc6-f20b-430d-9b36-1a6befd23b6c", | - | | "name": "volume2", | - | | "type": "OS::Cinder::Volume" | - | | }, | - | | { | - | | "id": "700495ee-38e6-41a0-963f-f3f9a24c0f75", | - | | "name": "volume1", | - | | "type": "OS::Cinder::Volume" | - | | } | - | | ] | - | | } | - | resource_graph | [ | - | | { | - | | "0x0": [ | - | | "OS::Cinder::Volume", | - | | "700495ee-38e6-41a0-963f-f3f9a24c0f75", | - | | "volume1" | - | | ], | - | | "0x1": [ | - | | "OS::Cinder::Volume", | - | | "12e2abc6-f20b-430d-9b36-1a6befd23b6c", | - | | "volume2" | - | | ] | - | | }, | - | | [] | - | | ] | - | status | available | - +-----------------+-----------------------------------------------------------+ - -Scheduled Opeartions --------------------- - -Execute a protect operation automatically with a scheduler - -.. code-block:: console - - karbor trigger-create 'My Trigger' 'time' "pattern"="BEGIN:VEVENT\nRRULE:FREQ=MINUTELY;INTERVAL=5;\nEND:VEVENT","format"="calendar" - +------------+------------------------------------------------------------------------------+ - | Property | Value | - +------------+------------------------------------------------------------------------------+ - | id | b065836f-6485-429d-b12c-e04395c5f58e | - | name | My Trigger | - | properties | { | - | | "format": "calendar", | - | | "pattern": "BEGIN:VEVENT\\nRRULE:FREQ=MINUTELY;INTERVAL=5;\\nEND:VEVENT", | - | | "start_time": "2017-03-02 22:56:42" | - | | } | - | type | time | - +------------+------------------------------------------------------------------------------+ - karbor scheduledoperation-create 'Protect with My Trigger' protect b065836f-6485-429d-b12c-e04395c5f58e "plan_id"="ca572b42-6d35-4d81-bb4e-c9b100a3387a","provider_id"="cf56bd3e-97a7-4078-b6d5-f36246333fd9" - +----------------------+---------------------------------------------------------+ - | Property | Value | - +----------------------+---------------------------------------------------------+ - | description | None | - | enabled | True | - | id | 2ebcf7cc-d8fe-4a70-af71-8a13f20556fb | - | name | PMT | - | operation_definition | { | - | | "plan_id": "ca572b42-6d35-4d81-bb4e-c9b100a3387a", | - | | "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9" | - | | } | - | operation_type | protect | - | trigger_id | b065836f-6485-429d-b12c-e04395c5f58e | - +----------------------+---------------------------------------------------------+ - karbor checkpoint-list cf56bd3e-97a7-4078-b6d5-f36246333fd9 - +--------------------------------------+----------------------------------+-----------+-----------------------------------------------------------+------------+ - | Id | Project id | Status | Protection plan | Created at | - +--------------------------------------+----------------------------------+-----------+-----------------------------------------------------------+------------+ - | 92e74f0c-8519-4928-9bd5-0039e0fe92b0 | 9632a0c585c94d708c57a83190913c76 | available | { | 2017-03-03 | - | | | | "id": "ca572b42-6d35-4d81-bb4e-c9b100a3387a", | | - | | | | "name": "Plan1", | | - | | | | "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", | | - | | | | "resources": [ | | - | | | | { | | - | | | | "id": "d72e83c2-4083-4cc7-9283-4578332732ab", | | - | | | | "name": "Volume1", | | - | | | | "type": "OS::Cinder::Volume" | | - | | | | } | | - | | | | ] | | - | | | | } | | - +--------------------------------------+----------------------------------+-----------+-----------------------------------------------------------+------------+ diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 040a7e59..00000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -====================== - Administration Guide -====================== - -.. toctree:: - :maxdepth: 2 - - provider - client diff --git a/doc/source/admin/provider.rst b/doc/source/admin/provider.rst deleted file mode 100644 index 03a0afa0..00000000 --- a/doc/source/admin/provider.rst +++ /dev/null @@ -1,51 +0,0 @@ -============================= -Configure Protection Provider -============================= - -Provider Configuration Files ----------------------------- - -Before starting the karbor-protection service, the admin needs to configure a -Protection Providers in /etc/karbor/providers.d/ - -Each file must contain the `provider` section with the following fields: - -* name - name of the provider -* description - One sentence representing the provider -* id - unique id for the provider, should be generated with uuid4 -* plugin - multiple plugin statements, for each protection plugin enabled. - Available options are under the `karbor.protections` namespace entry point. -* bank - bank plugin used for this provider. - Available options are under the `karbor.protections` namespace entry point. -* enabled - true or false, whether to load the provider or not - -Each protection plugin and the bank require additional configuration. Each -plugin defines the section and configuration options. - -The "OpenStack Infra Provider" is the default provider, and can be used, -removed, or serve as a base for other providers. - -Example -~~~~~~~ - -.. code-block:: ini - - [provider] - name = OS Infra Provider - description = This provider uses OpenStack's own services (swift, cinder) as storage - id = cf56bd3e-97a7-4078-b6d5-f36246333fd9 - plugin=karbor-volume-protection-plugin - bank=karbor-swift-bank-plugin - enabled = True - - [swift_client] - swift_auth_url=http://10.229.47.230/identity/ - swift_user=admin - swift_key=123456 - swift_tenant_name=admin - - [swift_bank_plugin] - lease_expire_window=120 - lease_renew_window=100 - lease_validity_window=100 - diff --git a/doc/source/api-service-class-diagram.pu b/doc/source/api-service-class-diagram.pu deleted file mode 100644 index cd7ef0ba..00000000 --- a/doc/source/api-service-class-diagram.pu +++ /dev/null @@ -1,57 +0,0 @@ -@startuml - -title API Service Class Diagram - -class wsgi.Controller{ - -} - - -class ProvidersController extends wsgi.Controller{ - +index(self, req):[]Provider - +show(self, req, provider_id:String):Provider -} - -class CheckpointsController extends wsgi.Controller{ - +index(self, req, provider_id:String):[]Checkpoint - +create(self, req, body:JSON, provider_id:String):Checkpoint - +show(self, req, provider_id:String, checkpoint_id:String):):Checkpoint - +delete(self, req, provider_id:String, checkpoint_id:String):):void -} - -class ProtectablesController extends wsgi.Controller{ - +index(self, req):[]Protectable - +show(self, req, protectable_type:String):[]Protectable - +index_instances(self, req, protectable_type:String):[]Resource -} - -class PlansController extends wsgi.Controller{ - +create(self, req, body:JSON):Plan - +index(self, req):[]Plan - +show(self, req, id:String):Plan - +update(self, req, id:String):Plan - +delete(self, req, id:String):void -} - -class ScheduledOperationsController extends wsgi.Controller{ - +create(self, req, body:JSON):ScheduledOperation - +index(self, req):[]ScheduledOperation - +show(self, req, id:String):ScheduledOperation - +delete(self, req, id:String):void -} - - -class RestorationsController extends wsgi.Controller{ - +create(self, req, body:JSON):Restoration - +index(self, req):[]Restoration - +show(self, req, id:String):Restoration -} - -class TriggersController extends wsgi.Controller{ - +create(self, req, body:JSON):Trigger - +index(self, req):[]Trigger - +show(self, req, id:String):Trigger - +delete(self, req, id:String):void -} - -@enduml diff --git a/doc/source/bank_plugins.rst b/doc/source/bank_plugins.rst deleted file mode 100644 index 26f9de0d..00000000 --- a/doc/source/bank_plugins.rst +++ /dev/null @@ -1,28 +0,0 @@ -Bank Plugins ------------- - -Swift -""""" - -.. autoclass:: karbor.services.protection.bank_plugins.swift_bank_plugin.SwiftBankPlugin - :noindex: - :members: - :show-inheritance: - - -Local Filesystem -"""""""""""""""" - -.. autoclass:: karbor.services.protection.bank_plugins.file_system_bank_plugin.FileSystemBankPlugin - :noindex: - :members: - :show-inheritance: - - -S3 -"" - -.. autoclass:: karbor.services.protection.bank_plugins.s3_bank_plugin.S3BankPlugin - :noindex: - :members: - :show-inheritance: diff --git a/doc/source/cli/index.rst b/doc/source/cli/index.rst deleted file mode 100644 index b46926f6..00000000 --- a/doc/source/cli/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -======================== -Karbor CLI Documentation -======================== - -In this section you will find information on Karbor's command line -interface. - -.. toctree:: - :maxdepth: 1 - - karbor-status diff --git a/doc/source/cli/karbor-status.rst b/doc/source/cli/karbor-status.rst deleted file mode 100644 index 0ef7946e..00000000 --- a/doc/source/cli/karbor-status.rst +++ /dev/null @@ -1,83 +0,0 @@ -============= -karbor-status -============= - ----------------------------------------- -CLI interface for Karbor status commands ----------------------------------------- - -Synopsis -======== - -:: - - karbor-status [] - -Description -=========== - -:program:`karbor-status` is a tool that provides routines for checking the -status of a Karbor deployment. - -Options -======= - -The standard pattern for executing a :program:`karbor-status` command is:: - - karbor-status [] - -Run without arguments to see a list of available command categories:: - - karbor-status - -Categories are: - -* ``upgrade`` - -Detailed descriptions are below: - -You can also run with a category argument such as ``upgrade`` to see a list of -all commands in that category:: - - karbor-status upgrade - -These sections describe the available categories and arguments for -:program:`karbor-status`. - -Upgrade -~~~~~~~ - -.. _karbor-status-checks: - -``karbor-status upgrade check`` - Performs a release-specific readiness check before restarting services with - new code. For example, missing or changed configuration options, - incompatible object states, or other conditions that could lead to - failures while upgrading. - - **Return Codes** - - .. list-table:: - :widths: 20 80 - :header-rows: 1 - - * - Return code - - Description - * - 0 - - All upgrade readiness checks passed successfully and there is nothing - to do. - * - 1 - - At least one check encountered an issue and requires further - investigation. This is considered a warning but the upgrade may be OK. - * - 2 - - There was an upgrade status check failure that needs to be - investigated. This should be considered something that stops an - upgrade. - * - 255 - - An unexpected error occurred. - - **History of Checks** - - **x.x.x (Stein)** - - * Sample check to be filled in with checks as they are added in Stein. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100755 index 0692d713..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,112 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinxcontrib.apidoc', - # 'sphinx.ext.intersphinx', - 'openstackdocstheme', - 'oslo_config.sphinxext', - 'oslo_config.sphinxconfiggen', - 'oslo_policy.sphinxext', - 'oslo_policy.sphinxpolicygen', - 'reno.sphinxext' -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable -modindex_common_prefix = [ - 'karbor.', - 'karbor.services.', -] -exclude_patterns = [ - 'api/karbor.tests.*', - 'api/karbor.wsgi.*', - 'api/karbor.services.protection.bank_plugins.*', - 'api/karbor.services.protection.protectable_plugins.*', - 'api/karbor.services.protection.protection_plugins.*', -] - -config_generator_config_file = [ - ('../../etc/oslo-config-generator/karbor.conf', - '_static/karbor'), -] - -policy_generator_config_file = '../../etc/karbor-policy-generator.conf' -sample_policy_basename = '_static/karbor' - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'karbor' -copyright = u'2013, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- sphinxcontrib.apidoc configuration -------------------------------------- - -apidoc_module_dir = '../../karbor' -apidoc_output_dir = 'contributor/api' -apidoc_excluded_paths = [ - 'tests', - 'wsgi', - 'services/protection/bank_plugins', - 'services/protection/protectable_plugins', - 'services/protection/protection_plugins', -] - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# -- Options for openstackdocstheme ------------------------------------------- -repository_name = 'openstack/karbor' -bug_project = project.lower() -bug_tag = '' diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index deec8ad2..00000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _configuring: - -=================== -Configuration Guide -=================== - -This section provides a list of all possible options for each configuration -file. - -.. toctree:: - :glob: - :maxdepth: 1 - - * diff --git a/doc/source/configuration/karbor.rst b/doc/source/configuration/karbor.rst deleted file mode 100644 index 29a4995d..00000000 --- a/doc/source/configuration/karbor.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _karbor.conf: - ------------ -karbor.conf ------------ - -.. show-options:: - :config-file: etc/oslo-config-generator/karbor.conf diff --git a/doc/source/configuration/policy.rst b/doc/source/configuration/policy.rst deleted file mode 100644 index 06005139..00000000 --- a/doc/source/configuration/policy.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _karbor-policy-generator.conf: - -==================== -Policy configuration -==================== - -Configuration -~~~~~~~~~~~~~ - -The following is an overview of all available policies in Karbor. For a sample -configuration file. - -.. show-policy:: - :config-file: ../../etc/karbor-policy-generator.conf diff --git a/doc/source/contributor/architecture.rst b/doc/source/contributor/architecture.rst deleted file mode 100644 index cd17d89f..00000000 --- a/doc/source/contributor/architecture.rst +++ /dev/null @@ -1,136 +0,0 @@ -============ -Architecture -============ - -High Level Architecture -======================= -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/ - high_level_architecture.png - :alt: Solution Overview - :width: 600 - :align: center - -The system is built from independent services and a scalable *Workflow -engine* that ties them together: - -API Service -=========== - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/ - karbor-api.png - :width: 600 - -These top-level north-bound APIs expose Application Data Protection services to -the Karbor user. - -The purpose of the services is to maximize flexibility and accommodate for -(hopefully) any kind of protection for any type of resource, whether it is a -basic OpenStack resource (such as a VM, Volume, Image, etc.) or some ancillary -resource within an application system that is not managed in OpenStack (such as -a hardware device, an external database, etc.). - - -Resource (Protectable) API --------------------------- - -Enables the Karbor user to access information about which resource types are -protectable (i.e. can be protected by Karbor). In addition, enables the user to -get additional information on each resource type, such as a list of actual -instances and their dependencies. - -Provider API ------------- - -Enables the Karbor user to list available providers and get parameters and -result schema super-set for all plugins of a specific Provider. - -Plan API --------- - -This API enables the Karbor user to access the protection Plan registry and do -the following operations: - -- Plan CRUD. -- List Plans. -- Starting and suspending of plans. - -Automatic Operation API ------------------------ - -This API enables the Karbor user to manage protection Operations: - -- Create a checkpoint for a given Protection Plan. -- Delete unneeded checkpoints from the provider. -- Query the status on a given Operation ID. - -Checkpoint API --------------- - -This API enables the Karbor user to access and manage checkpoints stored in -the protection provider: - -- List all checkpoints given a Bank ID. -- Show Information on a given checkpoint ID. -- Delete a checkpoint. -- Create a checkpoint. - -Restore API ------------ - -This API enables the Karbor user to restore a checkpoint onto a restore target: - -- Create restored system from a checkpoint. - -Operation Engine Service -======================== - -This subsystem is responsible for scheduling and orchestrating the execution of -*Protection Plans*. - -The implementation can be replaced by any other external solution since it uses -only functions that are available through the north-bound API. - -Once an entity is created, it can be tracked through the north-bound API, -so monitoring the operations is independent from the scheduler. - -It will be responsible for the automatic execution of specific operations -and tracking them. - -Automatic Operation -------------------- - -Automatic operations are the core of the scheduler. They define higher level -automatic logic. A simple scenario is a set of scheduled operations that -perform basic APIs at a specific trigger. There will also be complex scheduling -policies available that perform multiple north-bound basic APIs. - -Trigger Engine --------------- - -This sub-component of the schedule service is responsible for generating -triggers, which begin the execution of the Plan Orchestration. - -It can be done based on a timer or an event collector, based on implementation. - -In the first Karbor reference implementation, the trigger engine will only -provide time-based triggers. - -Scheduled Operation -------------------- - -This sub-component of the schedule service is responsible for holding the -mapping between a trigger and operation(s). - -Protection Service -================== - -This subsystem is responsible for handling the following tasks: - -- Operation Execution -- Protection Provider management - -WorkFlow Engine ---------------- - -This pluggable component is responsible for executing and orchestrating the -flow of the plan across all protection providers. diff --git a/doc/source/contributor/bank_plugins_guide.rst b/doc/source/contributor/bank_plugins_guide.rst deleted file mode 100644 index 16886bf8..00000000 --- a/doc/source/contributor/bank_plugins_guide.rst +++ /dev/null @@ -1,105 +0,0 @@ - -============================== -Bank Plugins Development Guide -============================== - -.. contents:: :depth: 2 - -Introduction -============ - -This Guide provides instructions on how to develop and use your bank plugins; -the guide also gives an overview of the existing bank plugins. - -Before you read this document, it is recommended to: - -#. Deploy an OpenStack environment with the latest Karbor version. -See the `Karbor Installation Guide `_. - - -Overview -======== - -The bank plugin is responsible for persisting data between protect and restore -operations, and between different sites. This gives Karbor the flexibility store metadata -in many locations, object stores such as Swift, document stores such as MongoDB, relational -databases such as MariaDB, etc. - -So a simplified object store interface of bank is defined for most backends to support -saving the metadata and backup data of the checkpoints. - -You can extend the functionality of the Karbor's bank through implementing new bank plugins. - - -Existing Plugins -================ -.. toctree:: - :maxdepth: 1 - - ../bank_plugins - - -Create a bank plugin -==================== -The Karbor code-base has a python API that corresponds to the set of API calls -you must implement to be a Karbor Bank plugin. Within the source code directory, -look at karbor/service/protection/bank_plugin.py - -A bank plugin usually consists of code to: - -#. Lists: This function will list all the object from the default container of the bank backend. - -#. Creates: This function will create or update one object to the default container of the bank backend. - -#. Gets: This function will get one object from the default container of the bank backend. - -#. Deletes: This function will delete one object from the default container of the bank backend. - - -In order to tell whether the checkpoint (saved in the bank) is a zombie or not, a lease mechanism is -introduced to the bank plugin. - -* `The detail about the bank plugin lease `_. - -The bank plugin will play a role as lease client while the bank bankends server -(i.e swift cluster) plays as the lease server. So the bank plugin for a lease server -should consist of code to: - -#. acquire_lease - The bank plugin (lease client) will use this function to acquire a lease from - bank server (lease server). For swift specifically, it will create a lease object - in swift container and set an expire_window for this lease. - -#. renew_lease - This function will be called by each lease client in the background periodically. - -#. check_lease_validity - This function is used by GC to check whether the lease object exists or not in - lease server side. - - -Add the configuration of the bank plugin - -#. Adding the Plugin class module to the entry_points - Add the bank plugin module name to the protection namespace of karbor in the entry_points - section of setup.cfg file:: - - [entry_points] - karbor.protections = - karbor-swift-bank-plugin = karbor.services.protection.bank_plugins.swift_bank_plugin:SwiftBankPlugin - karbor-fs-bank-plugin = karbor.services.protection.bank_plugins.file_system_bank_plugin:FileSystemBankPlugin - - -#. The bank plugin can be used by karbor. Before starting the karbor-protection service, - The admin need to configure the bank plugin entry point name to the configuration of the provider - (/etc/karbor/providers.d/openstack-fs-bank.conf), Let us take local fs bank plugin as a example:: - - [provider] - name = OS Infra Provider with local file system bank - description = This provider uses local file system as the bank of karbor - id = 6659007d-6f66-4a0f-9cb4-17d6aded0bb9 - plugin=karbor-volume-protection-plugin - bank=karbor-fs-bank-plugin - enabled=True - - diff --git a/doc/source/contributor/devref/index.rst b/doc/source/contributor/devref/index.rst deleted file mode 100644 index 15a9c430..00000000 --- a/doc/source/contributor/devref/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Developer Reference -=================== - -Code Reference --------------- - -.. toctree:: - :maxdepth: 1 - - ../api/modules diff --git a/doc/source/contributor/devstack-installation.rst b/doc/source/contributor/devstack-installation.rst deleted file mode 100644 index e5a9b819..00000000 --- a/doc/source/contributor/devstack-installation.rst +++ /dev/null @@ -1,73 +0,0 @@ -===================== -Devstack Installation -===================== - -This type of installation is for developers or testing, and not for production. - -Single-node Devstack Installation -================================= -In order to install Karbor using Devstack on a single node, add the following to -your local.conf, under [[local|localrc]]: - -.. code-block:: none - - enable_plugin karbor https://git.openstack.org/openstack/karbor master - enable_plugin karbor-dashboard https://git.openstack.org/openstack/karbor-dashboard master - enable_service karbor-api - enable_service karbor-operationengine - enable_service karbor-protection - # Karbor Dashboard depends on Horizon - enable_service karbor-dashboard - -Dependencies -============ - -Heat -~~~~ - -.. code-block:: none - - enable_plugin heat https://git.openstack.org/openstack/heat master - enable_service h-eng h-api h-api-cfn h-api-cw - -Swift (recommended) -~~~~~~~~~~~~~~~~~~~ - -Essential for the basic protection provider. - -.. code-block:: none - - SWIFT_REPLICAS=1 - SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 - SWIFT_DATA_DIR=$DEST/data - enable_service s-proxy s-object s-container s-account - -Cinder (optional) -~~~~~~~~~~~~~~~~~ - -.. code-block:: none - - enable_service cinder c-api c-vol c-sch c-bak - -Glance (optional) -~~~~~~~~~~~~~~~~~ - -.. code-block:: none - - enable_service g-api g-reg - -Nova (optional) -~~~~~~~~~~~~~~~ - -.. code-block:: none - - enable_service n-cpu n-api n-cond n-sch n-novnc n-cauth placement-api - - -Neutron (optional) -~~~~~~~~~~~~~~~~~~ - -.. code-block:: none - - enable_service neutron q-svc q-agt q-dhcp q-meta - disable_service n-net diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index 555c08b1..00000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,45 +0,0 @@ -========================= -Contributor Documentation -========================= - -.. toctree:: - :maxdepth: 2 - - architecture - devstack-installation - ../specs/index - devref/index - - -Communication and Meetings -========================== - -- Karbor Launchpad Link: \ https://launchpad.net/karbor -- Karbor Code Review: \ https://review.opendev.org/#/q/karbor+status:open,n,z -- Karbor Code Repository: \ https://github.com/openstack/karbor -- Karbor daily IRC Channel: #openstack-karbor -- Karbor IRC Meeting: every two weeks (on odd weeks) on Tuesday at 0900 UTC in - #openstack-meeting (IRC webclient). - - ICS File: http://eavesdrop.openstack.org/calendars/karbor-team-meeting.ics - - Additional Info: https://wiki.openstack.org/wiki/Meetings/karbor - -Plugins Development Guide -========================= -.. toctree:: - :maxdepth: 1 - - bank_plugins_guide - protectable_plugins_guide - protection_plugins_guide - -Additional References -===================== - -- `OpenStack Tokyo Summit 2015 talk `_ -- `OpenStack Austin Summit 2016 talk `_ -- `OpenStack Barcelona Summit 2016 talk `_ -- `OpenStack Boston Summit 2017 talk `_ -- `Karbor overview slide `_ -- `Karbor overview blog `_ diff --git a/doc/source/contributor/protectable_plugins_guide.rst b/doc/source/contributor/protectable_plugins_guide.rst deleted file mode 100644 index 5c7c713f..00000000 --- a/doc/source/contributor/protectable_plugins_guide.rst +++ /dev/null @@ -1,79 +0,0 @@ - -===================================== -Protectable Plugins Development Guide -===================================== - -.. contents:: :depth: 2 - -Introduction -============ - -This Guide provides instructions on how to develop and use your protectable plugins; -the guide also gives an overview of the existing protectable plugins. - -Before you read this document, it is recommended to: - -#. Deploy an OpenStack environment with the latest Karbor version. -See the `Karbor Installation Guide `_. - - -Overview -======== - -The protectable plugin is responsible for the implementation of getting a type of protectable -element which Karbor can protect. Most prominently OpenStack resources (volume, project, server, -etc). The actual instance of protectable element is named Resource. The Protectable Plugin about -one type resource defines what types resource it depend on. It defines the dependency between -different resource type in the default distribution of Karbor. - -You can extend the functionality of gettting the Karbor's protectable resources -through implementing new protectable plugins. - - -Existing Plugins -================ -.. toctree:: - :maxdepth: 1 - - ../protectable_plugins - - -Create a protectable plugin -=========================== -The Karbor code-base has a python API that corresponds to the set of API calls -you must implement to be a Karbor protectable plugin. Within the source code directory, -look at karbor/service/protection/protectable_plugin.py - -A protectable plugin must implement the following methods: - -#. get_resource_type: This function will return the resource type that this plugin supports. - -#. get_parent_resource_types: This function will return the possible parent resource types. - -#. list_resources: This function will list resource instances of type this plugin supported. - -#. show_resource: This function will show one resource detail information. - -#. get_dependent_resources: This function will be called for every parent resource type. - For example, a the parent resource types for volume are "server" and "project". - The method get_dependent_resources will be called once for each. - - -Add the configuration of the protectable plugin - -#. Adding the Plugin class module to the entry_points - Add the protectable plugin module name to the protection namespace of karbor in the entry_points - section of setup.cfg file - -.. code-block:: ini - - [entry_points] - karbor.protectables = - project = karbor.services.protection.protectable_plugins.project:ProjectProtectablePlugin - server = karbor.services.protection.protectable_plugins.server:ServerProtectablePlugin - volume = karbor.services.protection.protectable_plugins.volume:VolumeProtectablePlugin - image = karbor.services.protection.protectable_plugins.image:ImageProtectablePlugin - share = karbor.services.protection.protectable_plugins.share:ShareProtectablePlugin - network = karbor.services.protection.protectable_plugins.network:NetworkProtectablePlugin - database = karbor.services.protection.protectable_plugins.database:DatabaseInstanceProtectablePlugin - diff --git a/doc/source/contributor/protection_plugins_guide.rst b/doc/source/contributor/protection_plugins_guide.rst deleted file mode 100644 index fabfd708..00000000 --- a/doc/source/contributor/protection_plugins_guide.rst +++ /dev/null @@ -1,274 +0,0 @@ - -.. raw:: html - - - -.. role:: red -.. role:: green -.. role:: yellow -.. role:: indigo -.. role:: purple -.. role:: black - -==================================== -Protection Plugins Development Guide -==================================== - -.. contents:: :depth: 2 - -Introduction -============ - -Protection plugins are one of the core components of Karbor's protection -service. During protect, restore, and delete operations, Karbor is activating -the protection plugins for the relevant resources in a certain order. Each -protection plugin can handle one or more protectables (resource types) and -specifices the actual implementation for them. - -Overview -======== - -Plugins are responsible for the implementation of the following operations, for -each protectable (resource type) they cover: - -#. Protect - creating a checkpoint from an existing resource -#. Restore - creating a resource from an existing checkpoint -#. Delete - delete the resource from a checkpoint - -Plugins can and should use the bank in order to store protection metadata (and -sometimes the data itself) as part of a protect operations, consume the -metadata from the bank during a restore operation, and delete it the data from -the bank during a delete operation. - - -Each plugin must implement the following interface: - -.. code-block:: python - - class ProtectionPlugin(object): - def get_protect_operation(self, resource): - pass - - def get_restore_operation(self, resource): - pass - - def get_verify_operation(self, resource): - pass - - def get_copy_operation(self, resource): - pass - - def get_delete_operation(self, resource): - pass - - @classmethod - def get_supported_resources_types(cls): - pass - - @classmethod - def get_options_schema(cls, resource_type): - pass - - @classmethod - def get_saved_info_schema(cls, resource_type): - pass - - @classmethod - def get_restore_schema(cls, resource_type): - pass - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - @classmethod - def get_verify_schema(cls, resource_type): - pass - -#. **get_supported_resources_types**: this method should return a list of - resource types this plugin handles. The plugin's methods will be called for - each resource of these types. For example: `OS::Nova::Instance`, - `OS::Cinder::Volume`. -#. **get_options_schema**: returns a schema of options and parameters for a - protect operation. -#. **get_saved_info_schema**: returns a schema of data relevant to a protected - resource in a checkpoint -#. **get_saved_info**: returns the actual data relevant to a protected resource - in a checkpoint -#. **get_restore_schema**: returns a schema of parameters available for restore - operation. -#. **get_verify_schema**: returns a schema of parameters available for verify - operation. -#. **get_protect_operation**, **get_restore_operation**, - **get_delete_operation**: each returns an Operation instance to be used for - the protect, restore, and delete operations respectively. This instance may - be created for each resource, or shared between multiple resources. - The details of the Operation instance will be covered in the following - sections. - - -Order of Execution -================== - -Karbor's protection service orchestrate the execution of plugin operations, in -relation to the resources the operation is activated on. This is important, -because the order might be very important for some resources in specific -operations. On the other hand, some operations can happen concurrently in order -to speed up the operation. The `get_parent_resource_types` and -`get_dependent_resources` methods from the protectable plugins, define the -relation between two resources. - -Examples: - -#. In protect operation, a server plugin might want to quiesce the server - before protecting the volume in order to achieve a certain level of - consistency for the protected volume. After protecting the volume, we would - like to unquiesce the server again. However, once the server the volumes are - attached to is quiesced, multiple volumes can be protected concurrently. -#. In restore operation, we would like to restore the server's base image or - volume, prior to creating the server itself. However, multiple images and - volumes can be restored concurrently, as there is no relation between them. - -Three phases are defined for each operation: - -#. **Preparation Phase**: This phase is for performing actions in relation to a - resource's dependencies. It's called the "Preparation Phase" because it - where a plugin should do all the preparation required for the next phase. - Operation in this phase should be as short as possible since they are not - parraralized as much as in the following phases. As an example, taking - snapshots of all the volumes should happen in relation to the owning VMs - and also happen in a narrower time frame. Copying those snapshots can - happen later and is much more parallizable. -#. **Main Phase**: This phase is for doing work that has no dependencies or - time sensitivity. This will be mainly used for transferring the large amount - of information generated in the backup to different sites. -#. **Completion Phase**: This phase is for performing work once *all* the work, - not just preparation, was completed on a resource and all of it's - dependencies. This is a good place to attach resources (in case of restore) - or close transactions. - -As a Protection Plugin developer you would like to minimize the work needed to -be done in the preparation and completion phases and do the bulk of the work in -the main phase since will allow for the most efficient execution of the -operation. - -Implementing Plugin Operation -============================= - -In order to specify the detailed flow of each operation, a *Protection Plugin* -needs to return an Operation instance implementing 'hooks'. These hooks, differ -from one another by their time of execution in respect to other hooks, either -of the same resource, or other resources. The Operation interface: - -.. code-block:: python - - class Operation(object): - def on_prepare_begin(self, checkpoint, resource, context, parameters, - **kwargs): - pass - - def on_prepare_finish(self, checkpoint, resource, context, parameters, - **kwargs): - pass - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - pass - - def on_complete(self, checkpoint, resource, context, parameters, - **kwargs): - pass - - -It's important to note that it is not necessary to implement every hook. It's -completely valid to only use the main or preparation phase. In fact, only -complex protection plugins are believed to require to do work in all of the -phases. - -For *each* operation the plugin can implement each of the hooks: - -#. **Preparation hooks**: as noted, preparation is for running tasks in - relation to other resources in the graph. This is why two hooks exist, one - for running before dependent resources' preparation and one for after. - - #. **Prepare begin hook**: invoked before any hook of this resource and - dependent resources has begun. - - For tasks that need to happen before any dependent resource's operations - begin - - Hook method name: **on_prepare_begin** - - #. **Prepare finish hook**: invoked after any prepare hooks of dependent - resources are complete. - - For tasks that finish the work began in *prepare begin hook*, for tasks that - require that the dependent resource's prepare phase finished - - Hook method name: **on_prepare_finish** - -#. **Main hook**: invoked after the resource *prepare hooks* are complete. - - For tasks that do heavy lifting and can run in parallel to dependent or - dependee resources *main hooks* - - Hook method name: **on_main** - -#. **Complete hook**: invoked once the resource's main hook is complete, and - the dependent resources' *complete hooks* are complete - - For tasks that require that the dependent resource's operations are - complete, and finalize the operation on the resource. - - Hook method name: **on_complete** - -For example: a Protection Plugin for Nova servers, might implement a protect -operation by using *prepare begin hook* to quiesce the Server and/or contact a -guest agent to complete transactions. A protection plugin for Cinder volumes -can implement *prepare finish hook* to take a snapshot of the volume. The -server's *prepare finish hook* unquiesces the server and/or contacts a guest -agent. Both the server's and the volume's *main hook* do the heavy lifting of -copying the data. - -Notes: - -* Unimplemented methods are practically no-op -* Each such method receives as parameters: ``checkpoint``, ``context``, - ``resource``, and ``parameters`` objects - -.. figure:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/protection-service/hooks.png - :alt: Protection Plugin Hooks - :align: center - - Protection Plugin Hooks - - :green:`Green`: Child resource Prepare_begin depends on its parent resource - Prepare_begin - - :indigo:`Indigo`: The resource Prepare_finish depends on the resource - Prepare_begin - - :purple:`Purple`: Parent resource Prepare_finish depends on the child - resource Prepare_finish - - :yellow:`Yellow`: The resource Main depends on the resource Prepare_finish - - :red:`Red`: The resource Complete depends on the resource Main - - :black:`Black`: Parent resource Complete depends on the child resource's - Complete - - - -Existing Plugins -================ -.. toctree:: - :maxdepth: 1 - - ../protection_plugins diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 601789e5..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,72 +0,0 @@ -=================================== -Karbor: Application Data Protection -=================================== - -Introduction -============ -Karbor is an OpenStack project that provides a pluggable framework for -protecting and restoring Data and Metadata that comprises an OpenStack-deployed -application - Application Data Protection as a Service. - -Mission Statement -~~~~~~~~~~~~~~~~~ -To protect the Data and Metadata that comprises an OpenStack-deployed -Application against loss/damage (e.g. backup, replication) by providing a -standard framework of APIs and services that allows vendors to provide plugins -through a unified interface - -.. toctree:: - :maxdepth: 2 - - -Using Karbor -============ -.. toctree:: - :maxdepth: 2 - - readme - install/index - configuration/index - cli/index - admin/index - -Available Plugins -================= -.. toctree:: - :maxdepth: 2 - - bank_plugins - protectable_plugins - protection_plugins - -Contributor Docs -================ - -.. toctree:: - :maxdepth: 2 - - contributor/index - -Release Notes -============= - -.. toctree:: - :maxdepth: 1 - - releasenotes - -Sample Files -============ - -.. toctree:: - :maxdepth: 1 - - sample_policy - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc/source/install/common_configure.rst b/doc/source/install/common_configure.rst deleted file mode 100644 index 219ebfe1..00000000 --- a/doc/source/install/common_configure.rst +++ /dev/null @@ -1,73 +0,0 @@ -#. Edit the ``/etc/karbor/karbor.conf`` file and complete the following - actions: - - * In the ``[database]`` section, configure database access: - - .. code-block:: none - - [database] - ... - connection = mysql+pymysql://karbor:KARBOR_DBPASS@controller/karbor - - Replace ``KARBOR_DBPASS`` with the password you chose for the - Data Protection database. - - * In the ``[DEFAULT]`` section, - configure ``RabbitMQ`` message queue access: - - .. code-block:: none - - [DEFAULT] - ... - transport_url = rabbit://openstack:RABBIT_PASS@controller:5672 - - Replace ``RABBIT_PASS`` with the password you chose for the - ``openstack`` account in ``RabbitMQ``. - - * In the ``[keystone_authtoken]``, ``[trustee]``, - ``[clients_keystone]``, and ``[karbor_client]`` sections, - configure Identity service access: - - .. code-block:: none - - [keystone_authtoken] - ... - www_authenticate_uri = http://keystone1.example.com/identity - auth_url = http://controller/identity_admin - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = karbor - password = KARBOR_PASS - - [trustee] - ... - auth_type = password - auth_url = http://controller/identity_admin - username = karbor - password = KARBOR_PASS - user_domain_name = default - - [clients_keystone] - ... - auth_uri = http://controller/identity_admin - - [karbor_client] - ... - version = 1 - service_type = data-protect - service_name = karbor - - Replace ``KARBOR_PASS`` with the password you chose for the - ``karbor`` user in the Identity service. - -#. Populate the Data Protection database: - - .. code-block:: console - - # su -s /bin/sh -c "karbor-manage db sync" karbor - - .. note:: - - Ignore any deprecation messages in this output. diff --git a/doc/source/install/common_prerequisites.rst b/doc/source/install/common_prerequisites.rst deleted file mode 100644 index 8b9046cd..00000000 --- a/doc/source/install/common_prerequisites.rst +++ /dev/null @@ -1,131 +0,0 @@ -Prerequisites -------------- - -Before you install and configure Data Protection service, you must create a -database, service credentials, and API endpoints. Data Protection service also -requires additional information in the Identity service. - -#. To create the database, complete these steps: - - * Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - * Create the ``karbor`` database: - - .. code-block:: console - - CREATE DATABASE karbor; - - * Grant proper access to the ``karbor`` database: - - .. code-block:: console - - GRANT ALL PRIVILEGES ON karbor.* TO 'karbor'@'localhost' IDENTIFIED BY 'KARBOR_DBPASS'; - GRANT ALL PRIVILEGES ON karbor.* TO 'karbor'@'%' IDENTIFIED BY 'KARBOR_DBPASS'; - - Replace ``KARBOR_DBPASS`` with a suitable password. - - * Exit the database access client. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - * Create the ``karbor`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt karbor - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | e0353a670a9e496da891347c589539e9 | - | enabled | True | - | id | ca2e175b851943349be29a328cc5e360 | - | name | karbor | - +-----------+----------------------------------+ - - * Add the ``admin`` role to the ``karbor`` user: - - .. code-block:: console - - $ openstack role add --project service --user karbor admin - - .. note:: - - This command provides no output. - - * Create the ``karbor`` service entities: - - .. code-block:: console - - $ openstack service create --name karbor --description "Application Data Protection Service" data-protect - +-------------+-------------------------------------+ - | Field | Value | - +-------------+-------------------------------------+ - | description | Application Data Protection Service | - | enabled | True | - | id | 727841c6f5df4773baa4e8a5ae7d72eb | - | name | karbor | - | type | data-protect | - +-------------+-------------------------------------+ - -#. Create the Data Protection service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne data-protect public http://controller:8799/v1/%\(project_id\)s - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 3f4dab34624e4be7b000265f25049609 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 727841c6f5df4773baa4e8a5ae7d72eb | - | service_name | karbor | - | service_type | data-protect | - | url | http://controller:8799/v1/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne data-protect internal http://controller:8799/v1/%\(project_id\)s - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 3f4dab34624e4be7b000265f25049609 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 727841c6f5df4773baa4e8a5ae7d72eb | - | service_name | karbor | - | service_type | data-protect | - | url | http://controller:8799/v1/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne data-protect admin http://controller:8799/v1/%\(project_id\)s - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 3f4dab34624e4be7b000265f25049609 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 727841c6f5df4773baa4e8a5ae7d72eb | - | service_name | karbor | - | service_type | data-protect | - | url | http://controller:8799/v1/%(project_id)s | - +--------------+------------------------------------------+ diff --git a/doc/source/install/get_started.rst b/doc/source/install/get_started.rst deleted file mode 100644 index 1b8a9442..00000000 --- a/doc/source/install/get_started.rst +++ /dev/null @@ -1,24 +0,0 @@ -================================ -Data Protection service overview -================================ - -Karbor responsibility is protecting the Data and Meta-Data that comprises an -OpenStack-deployed Application against loss/damage (e.g. backup, replication) -- not to be confused with Application Security or DLP. It does that by providing -a standard framework of APIs and services that enables vendors to introduce various data -protection services into a coherent and unified flow for the user. - -OpenStack Data Protection Orchestration includes the following components: - -karbor-api - Accepts API calls for provider, plan, checkpoint, scheduled operations, - triggers, protectables, and restores. - -karbor-protection - Responsible for orchestrating basic operations (protect, restore, delete) - over multiple resources. - -karbor-operationengine - Responsible for composing basic operations, scheduling operations, and - tracking their progress. - diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index c85e5ef3..00000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -================== -Installation Guide -================== - -.. toctree:: - :maxdepth: 1 - - get_started - install - verify - launch-instance - next-steps - -To protect the Data and Metadata that comprises an OpenStack-deployed -Application against loss/damage (e.g. backup, replication), The Data -Protection service (Karbor) provides a standard framework of APIs and -services that allows vendors to provide plugins through a unified interface. - -This chapter assumes a working setup of OpenStack following the -`OpenStack Installation Tutorial `_. diff --git a/doc/source/install/install-source.rst b/doc/source/install/install-source.rst deleted file mode 100644 index 1849aa39..00000000 --- a/doc/source/install/install-source.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. _install-source: - -Install from source -~~~~~~~~~~~~~~~~~~~ - - -This section describes how to install and configure the Data Protection -service from source. - -.. include:: common_prerequisites.rst - -Install the services --------------------- - -Retrieve and install karbor with required packages:: - - git clone https://git.openstack.org/openstack/karbor - cd karbor - sudo pip install -e . - python setup.py install - -This procedure installs the ``karbor`` python library and the following -executables: - -* ``karbor-wsgi``: karbor wsgi script -* ``karbor-api``: karbor api script -* ``karbor-protection``: karbor protection script -* ``karbor-operationengine``: karbor operationengine script -* ``karbor-manage``: karbor manage script - -Generate sample configuration file karbor.conf.sample:: - - #use tox - tox -egenconfig - #or direct run oslo-config-generator - oslo-config-generator --config-file etc/oslo-config-generator/karbor.conf - -Generate sample policy file policy.yaml.sample:: - - #use tox - tox -egenpolicy - #or direct run oslopolicy-sample-generator - oslopolicy-sample-generator --config-file=etc/karbor-policy-generator.conf - -Install sample configuration files:: - - mkdir /etc/karbor - cp etc/api-paste.ini /etc/karbor - cp etc/karbor.conf.sample /etc/karbor/karbor.conf - cp etc/policy.yaml.sample /etc/karbor/policy.yaml - cp -r etc/providers.d /etc/karbor - -Create the log directory:: - - mkdir /var/log/karbor - -.. note:: - - Karbor provides more preconfigured providers with different bank and - protection plugins (such as EISOO, S3, File system, Cinder snapshot - plugin, and more). If these were available for your environment, you - can consult these provider configuration files for reference, or use - them as-is, by copying the configuration files from 'devstack/providers.d' - to '/etc/karbor/providers.d'. By default, karbor use 'OS Infra Provider - with swift bank'. - -Install the client ------------------- - -Retrieve and install karbor client:: - - git clone https://git.openstack.org/openstack/python-karborclient.git - cd python-karborclient - python setup.py install - -Configure components --------------------- - -#. Add system user:: - - groupadd karbor - useradd karbor -g karbor -d /var/lib/karbor -s /sbin/nologin - -.. include:: common_configure.rst - -Finalize installation ---------------------- - -You can start karbor services directly from command line by executing -``karbor-api``, ``karbor-protection`` and ``karbor-operationengine``. diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst deleted file mode 100644 index e9bc8ccc..00000000 --- a/doc/source/install/install-ubuntu.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _install-ubuntu: - -Install and configure for Ubuntu -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Data Protection -service for Ubuntu 14.04 (LTS) and Ubuntu 16.04 (LTS). - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # pip install karbor - -.. include:: common_configure.rst - -Finalize installation ---------------------- - -1. Restart the Data Protection services: - - .. code-block:: console - - # service karbor-api restart - # service karbor-operationengine restart - # service karbor-protection restart diff --git a/doc/source/install/install.rst b/doc/source/install/install.rst deleted file mode 100644 index d16d9c06..00000000 --- a/doc/source/install/install.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. _install: - -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure Karbor, Application Data -Protection service, on the controller node. - -Before you begin, make sure you already have a working OpenStack environment. - -Required components: - -* `Identity service (keystone) `_ - -Recommended components: - -* `Compute service (nova) `_ -* `Block Storage service (cinder) `_ -* `Image service (glance) `_ -* `Object Storage service (swift) `_ -* `Shared Filesystems service (manila) `_ - -Note that installation and configuration vary by distribution. - -.. toctree:: - :maxdepth: 1 - - install-source - install-ubuntu - mod_wsgi diff --git a/doc/source/install/launch-instance.rst b/doc/source/install/launch-instance.rst deleted file mode 100644 index 73080135..00000000 --- a/doc/source/install/launch-instance.rst +++ /dev/null @@ -1,248 +0,0 @@ -.. _launch-instance: - -Launch an instance -~~~~~~~~~~~~~~~~~~ - -In environments that include the Data Protection service, you can create a -checkpoint and restore this checkpoint. - -Create a checkpoint -------------------- - -Create a checkpoint for some resource. For example, for volume: - -#. Source the ``demo`` credentials to perform - the following steps as a non-administrative project: - - .. code-block:: console - - $ . demo-openrc - -#. list provider. - - .. code-block:: console - - $ karbor provider-list - +--------------------------------------+-------------------+-------------------------------------------------------------------------------------+ - | Id | Name | Description | - +--------------------------------------+-------------------+-------------------------------------------------------------------------------------+ - | b766f37c-d011-4026-8228-28730d734a3f | No-Op Provider | This provider does nothing for each protect and restore operation. Used for testing | - | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | OS Infra Provider | This provider uses OpenStack's own services (swift, cinder) as storage | - | e4008868-be97-492c-be41-44e50ef2e16f | EISOO Provider | This provider provides data protection for applications with EISOO AnyBackup | - +--------------------------------------+-------------------+-------------------------------------------------------------------------------------+ - -#. list protectable. - - .. code-block:: console - - $ karbor protectable-list - +-----------------------+ - | Protectable type | - +-----------------------+ - | OS::Cinder::Volume | - | OS::Glance::Image | - | OS::Keystone::Project | - | OS::Nova::Server | - +-----------------------+ - -#. list volume resources, and get volume ID. - - .. code-block:: console - - $ openstack volume list - +--------------------------------------+--------------+-----------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +--------------------------------------+--------------+-----------+------+-------------+ - | 286a43e9-3899-4983-965f-d8b1faef5e58 | Volume1 | available | 1 | | - +--------------------------------------+--------------+-----------+------+-------------+ - -#. Create a plan for this volume: - - .. code-block:: console - - $ karbor plan-create Plan1 cf56bd3e-97a7-4078-b6d5-f36246333fd9 '286a43e9-3899-4983-965f-d8b1faef5e58'='OS::Cinder::Volume'='Volume1' - +-------------+----------------------------------------------------+ - | Property | Value | - +-------------+----------------------------------------------------+ - | description | None | - | id | 81ac01b7-0a69-4b0b-8ef5-bd46a900c90a | - | name | Plan1 | - | parameters | {} | - | provider_id | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | - | resources | [ | - | | { | - | | "id": "286a43e9-3899-4983-965f-d8b1faef5e58", | - | | "name": "Volume1", | - | | "type": "OS::Cinder::Volume" | - | | } | - | | ] | - | status | suspended | - +-------------+----------------------------------------------------+ - -#. Create checkpoint by plan: - - .. code-block:: console - - $ karbor checkpoint-create cf56bd3e-97a7-4078-b6d5-f36246333fd9 81ac01b7-0a69-4b0b-8ef5-bd46a900c90a - +-----------------+------------------------------------------------------+ - | Property | Value | - +-----------------+------------------------------------------------------+ - | created_at | None | - | extra_info | None | - | id | c1112037-b19c-421a-83c9-dd209e785189 | - | project_id | 690ccee85834425e973258252e0da888 | - | protection_plan | { | - | | "id": "81ac01b7-0a69-4b0b-8ef5-bd46a900c90a", | - | | "name": "Plan1", | - | | "resources": [ | - | | { | - | | "id": "286a43e9-3899-4983-965f-d8b1faef5e58", | - | | "name": "Volume1", | - | | "type": "OS::Cinder::Volume" | - | | } | - | | ] | - | | } | - | resource_graph | None | - | status | protecting | - +-----------------+------------------------------------------------------+ - -#. After a short time, verify successful creation of the checkpoint: - - .. code-block:: console - - $ karbor checkpoint-show cf56bd3e-97a7-4078-b6d5-f36246333fd9 c1112037-b19c-421a-83c9-dd209e785189 - +-----------------+-----------------------------------------------------------+ - | Property | Value | - +-----------------+-----------------------------------------------------------+ - | created_at | 2017-03-27 | - | extra_info | None | - | id | c1112037-b19c-421a-83c9-dd209e785189 | - | project_id | 690ccee85834425e973258252e0da888 | - | protection_plan | { | - | | "id": "81ac01b7-0a69-4b0b-8ef5-bd46a900c90a", | - | | "name": "Plan1", | - | | "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", | - | | "resources": [ | - | | { | - | | "id": "286a43e9-3899-4983-965f-d8b1faef5e58", | - | | "name": "Volume1", | - | | "type": "OS::Cinder::Volume" | - | | } | - | | ] | - | | } | - | resource_graph | [ | - | | { | - | | "0x0": [ | - | | "OS::Cinder::Volume", | - | | "286a43e9-3899-4983-965f-d8b1faef5e58", | - | | "Volume1" | - | | ] | - | | }, | - | | [] | - | | ] | - | status | available | - +-----------------+-----------------------------------------------------------+ - -#. Create restore by checkpoint: - - .. code-block:: console - - $ karbor restore-create cf56bd3e-97a7-4078-b6d5-f36246333fd9 c1112037-b19c-421a-83c9-dd209e785189 - +------------------+--------------------------------------+ - | Property | Value | - +------------------+--------------------------------------+ - | checkpoint_id | c1112037-b19c-421a-83c9-dd209e785189 | - | id | 2c9dea83-3e12-4fa1-80af-16f02b5738ef | - | parameters | {} | - | project_id | 690ccee85834425e973258252e0da888 | - | provider_id | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | - | resources_reason | {} | - | resources_status | {} | - | restore_target | None | - | status | in_progress | - +------------------+--------------------------------------+ - -#. After a short time, verify successful restore for the checkpoint: - - .. code-block:: console - - $ karbor restore-show 2c9dea83-3e12-4fa1-80af-16f02b5738ef - +------------------+----------------------------------------------------------------------------+ - | Property | Value | - +------------------+----------------------------------------------------------------------------+ - | checkpoint_id | c1112037-b19c-421a-83c9-dd209e785189 | - | id | 2c9dea83-3e12-4fa1-80af-16f02b5738ef | - | parameters | {} | - | project_id | 690ccee85834425e973258252e0da888 | - | provider_id | cf56bd3e-97a7-4078-b6d5-f36246333fd9 | - | resources_reason | {} | - | resources_status | {u'OS::Cinder::Volume#b0b2d98d-ec8a-498e-ad50-00a2240c76a2': u'available'} | - | restore_target | None | - | status | success | - +------------------+----------------------------------------------------------------------------+ - -#. Delete the checkpoint: - - .. code-block:: console - - $ karbor checkpoint-delete cf56bd3e-97a7-4078-b6d5-f36246333fd9 c1112037-b19c-421a-83c9-dd209e785189 - -#. Create a trigger: - - .. code-block:: console - - $ karbor trigger-create 'trigger-every-5-minutes' 'time' "pattern"="BEGIN:VEVENT\nRRULE:FREQ=MINUTELY;INTERVAL=5;\nEND:VEVENT","format"="calendar" - +------------+------------------------------------------------------------------------------+ - | Property | Value | - +------------+------------------------------------------------------------------------------+ - | id | b065836f-6485-429d-b12c-e04395c5f58e | - | name | My Trigger | - | properties | { | - | | "format": "calendar", | - | | "pattern": "BEGIN:VEVENT\\nRRULE:FREQ=MINUTELY;INTERVAL=5;\\nEND:VEVENT", | - | | "start_time": "2017-03-02 22:56:42" | - | | } | - | type | time | - +------------+------------------------------------------------------------------------------+ - -#. Create a scheduled operation: - - .. code-block:: console - - $ karbor scheduledoperation-create 'Protect with My Trigger' protect b065836f-6485-429d-b12c-e04395c5f58e "plan_id"="81ac01b7-0a69-4b0b-8ef5-bd46a900c90a","provider_id"="cf56bd3e-97a7-4078-b6d5-f36246333fd9" - +----------------------+---------------------------------------------------------+ - | Property | Value | - +----------------------+---------------------------------------------------------+ - | description | None | - | enabled | True | - | id | 2ebcf7cc-d8fe-4a70-af71-8a13f20556fb | - | name | PMT | - | operation_definition | { | - | | "plan_id": "ca572b42-6d35-4d81-bb4e-c9b100a3387a", | - | | "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9" | - | | } | - | operation_type | protect | - | trigger_id | b065836f-6485-429d-b12c-e04395c5f58e | - +----------------------+---------------------------------------------------------+ - -#. After a short time, verify the scheduled operation already effective: - - .. code-block:: console - - $ karbor checkpoint-list cf56bd3e-97a7-4078-b6d5-f36246333fd9 --plan_id 81ac01b7-0a69-4b0b-8ef5-bd46a900c90a - +--------------------------------------+----------------------------------+-----------+-----------------------------------------------------------+------------+ - | Id | Project id | Status | Protection plan | Created at | - +--------------------------------------+----------------------------------+-----------+-----------------------------------------------------------+------------+ - | 92e74f0c-8519-4928-9bd5-0039e0fe92b0 | 690ccee85834425e973258252e0da888 | available | { | 2017-03-03 | - | | | | "id": "ca572b42-6d35-4d81-bb4e-c9b100a3387a", | | - | | | | "name": "Plan1", | | - | | | | "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", | | - | | | | "resources": [ | | - | | | | { | | - | | | | "id": "286a43e9-3899-4983-965f-d8b1faef5e58", | | - | | | | "name": "Volume1", | | - | | | | "type": "OS::Cinder::Volume" | | - | | | | } | | - | | | | ] | | - | | | | } | | - +--------------------------------------+----------------------------------+-----------+-----------------------------------------------------------+------------+ diff --git a/doc/source/install/mod_wsgi.rst b/doc/source/install/mod_wsgi.rst deleted file mode 100644 index be08dc0d..00000000 --- a/doc/source/install/mod_wsgi.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - - -Installing Karbor API with mod_wsgi -=================================== - -#. Install the Apache Service - - Fedora 21/RHEL7/CentOS7:: - - sudo yum install httpd - - Fedora 22 (or higher):: - - sudo dnf install httpd - - Debian/Ubuntu:: - - apt-get install apache2 - -#. Copy ``etc/apache2/apache-karbor-api.conf`` under the apache sites - - Fedora/RHEL7/CentOS7:: - - sudo cp etc/apache2/apache-karbor-api.conf /etc/httpd/conf.d/apache-karbor-api.conf - - Debian/Ubuntu:: - - sudo cp etc/apache2/apache-karbor-api.conf /etc/apache2/sites-available/apache-karbor-api.conf - -#. Edit ``apache-karbor-api.conf`` according to installation - and environment. - - * Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and - ``group`` values to appropriate user on your server. - * Modify the ``WSGIScriptAlias`` directive to point to the - %KARBOR_BIN_DIR%/karbor-wsgi script. - * Modify the ``Directory`` directive to set the path to the Karbor API - code. - * Modify the ``ErrorLog and CustomLog`` to redirect the logs to the right - directory. - -#. Enable the apache watcher site and reload - - Fedora/RHEL7/CentOS7:: - - sudo systemctl reload httpd - - Debian/Ubuntu:: - - sudo a2ensite apache-karbor-api - sudo apache2ctl -k restart diff --git a/doc/source/install/next-steps.rst b/doc/source/install/next-steps.rst deleted file mode 100644 index b237e189..00000000 --- a/doc/source/install/next-steps.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the Karbor service. - -To add more services, see the -`additional documentation on installing OpenStack `_ . - -To learn more about the Karbor service, read the `Karbor developer documentation -`__. diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index 6a052236..00000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the Data Protection service. - -.. note:: - - Perform these commands on the controller node. - -#. Source the ``admin`` tenant credentials: - - .. code-block:: console - - $ . admin-openrc - -#. List and show service components to verify successful launch and - registration of each process: - - .. code-block:: console - - $ openstack service list |grep data-protect - | dedab9a746e34d3990ca44bc2e885b49 | karbor | data-protect | - - $ openstack service show dedab9a746e34d3990ca44bc2e885b49 - +-------------+-------------------------------------+ - | Field | Value | - +-------------+-------------------------------------+ - | description | Application Data Protection Service | - | enabled | True | - | id | dedab9a746e34d3990ca44bc2e885b49 | - | name | karbor | - | type | data-protect | - +-------------+-------------------------------------+ - - $ karbor service-list - +----+------------------------+--------------+---------+-------+----------------------------+-----------------+ - | Id | Binary | Host | Status | State | Updated_at | Disabled Reason | - +----+------------------------+--------------+---------+-------+----------------------------+-----------------+ - | 2 | karbor-operationengine | controller | enabled | up | 2019-11-03T12:42:28.000000 | - | - | 3 | karbor-protection | controller | enabled | up | 2019-11-03T12:42:28.000000 | - | - +----+------------------------+--------------+---------+-------+----------------------------+-----------------+ diff --git a/doc/source/protectable_plugins.rst b/doc/source/protectable_plugins.rst deleted file mode 100644 index e1902c45..00000000 --- a/doc/source/protectable_plugins.rst +++ /dev/null @@ -1,66 +0,0 @@ -Protectable Plugins -------------------- - -Project -^^^^^^^ - -.. autoclass:: karbor.services.protection.protectable_plugins.project.ProjectProtectablePlugin() - :noindex: - :members: - :show-inheritance: - -Server -^^^^^^ - -.. autoclass:: karbor.services.protection.protectable_plugins.server.ServerProtectablePlugin() - :noindex: - :members: - :show-inheritance: - -Volume -^^^^^^ - -.. autoclass:: karbor.services.protection.protectable_plugins.volume.VolumeProtectablePlugin() - :noindex: - :members: - :show-inheritance: - -Image -^^^^^ - -.. autoclass:: karbor.services.protection.protectable_plugins.image.ImageProtectablePlugin() - :noindex: - :members: - :show-inheritance: - -Share -^^^^^ - -.. autoclass:: karbor.services.protection.protectable_plugins.share.ShareProtectablePlugin() - :noindex: - :members: - :show-inheritance: - -Network -^^^^^^^ - -.. autoclass:: karbor.services.protection.protectable_plugins.network.NetworkProtectablePlugin() - :noindex: - :members: - :show-inheritance: - -Database -^^^^^^^^ - -.. autoclass:: karbor.services.protection.protectable_plugins.database.DatabaseInstanceProtectablePlugin() - :noindex: - :members: - :show-inheritance: - -Pod -^^^ - -.. autoclass:: karbor.services.protection.protectable_plugins.pod.K8sPodProtectablePlugin() - :noindex: - :members: - :show-inheritance: diff --git a/doc/source/protection_plugins.rst b/doc/source/protection_plugins.rst deleted file mode 100644 index 24fba411..00000000 --- a/doc/source/protection_plugins.rst +++ /dev/null @@ -1,109 +0,0 @@ -Protection Plugins ------------------- - -Project -^^^^^^^ - - -Server -^^^^^^ - -Server to Bank -"""""""""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.server.nova_protection_plugin.NovaProtectionPlugin() - :noindex: - :members: - :show-inheritance: - -Volume -^^^^^^ - -Cinder Backup -""""""""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.volume.cinder_protection_plugin.CinderBackupProtectionPlugin() - :noindex: - :members: - :show-inheritance: - -Snapshot -"""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.volume.volume_snapshot_plugin.VolumeSnapshotProtectionPlugin() - :noindex: - :members: - :show-inheritance: - -Freezer -""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.volume.volume_freezer_plugin.FreezerProtectionPlugin() - :noindex: - :members: - :show-inheritance: - -Glance -"""""" - -.. autoclass:: karbor.services.protection.protection_plugins.volume.volume_glance_plugin.VolumeGlanceProtectionPlugin() - :noindex: - :members: - :show-inheritance: - - -Image -^^^^^ - -Image to Bank -""""""""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.image.image_protection_plugin.GlanceProtectionPlugin() - :noindex: - :members: - :show-inheritance: - -Database -^^^^^^^^ - -Database Backup -""""""""""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.database.database_backup_plugin.DatabaseBackupProtectionPlugin() - :noindex: - :members: - :show-inheritance: - -Network -^^^^^^^ - -Network to Bank -""""""""""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.network.neutron_protection_plugin.NeutronProtectionPlugin() - :noindex: - :members: - :show-inheritance: - -Pod -^^^ - -Pod to Bank -""""""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.pod.pod_protection_plugin.PodProtectionPlugin() - :noindex: - :members: - :show-inheritance: - -Share -^^^^^ - -Share to Bank -""""""""""""" - -.. autoclass:: karbor.services.protection.protection_plugins.share.share_snapshot_plugin.ManilaSnapshotProtectionPlugin() - :noindex: - :members: - :show-inheritance: - diff --git a/doc/source/readme.rst b/doc/source/readme.rst deleted file mode 100644 index b5f943fd..00000000 --- a/doc/source/readme.rst +++ /dev/null @@ -1,137 +0,0 @@ - -============ -Introduction -============ - -What is Karbor? -=============== - -Karbor is an OpenStack project that provides a pluggable framework for -protecting and restoring Data and Metadata that comprises an OpenStack-deployed -application - Application Data Protection as a Service. - - -Mission Statement ------------------ -To protect the Data and Metadata that comprises an OpenStack-deployed -Application against loss/damage (e.g. backup, replication) by providing a -standard framework of APIs and services that allows vendors to provide plugins -through a unified interface - - -Typical Use Case: 3-Tier Cloud App -================================== - -3-Tier Cloud App Web/App/DB - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/3-tier-app.png - :alt: 3-Tier Cloud App - :width: 600 - :align: center - -In order to provide full Protection for this typical use case, we would have to -protect many resources, which have some dependency between them. The following -diagram demonstrates how this dependency looks, in the form of a tree: - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/resource_tree_architecture.png - :alt: Resource Tree - :width: 600 - :align: center - -These resources can be divided into groups, each of which will be handled by a -different plugin in Karbor: - -- Volume -- VM -- Network -- Project -- Images - -Main Concepts -============= - -Protection Providers --------------------- - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/ - protection_provider.png - :width: 600 - -Protection providers are defined by the administrator for each tenant. The -encapsulate every aspect of the protection procedure, namely, where to place -the backup metadata and the data and how to do it. From the tenants perspective -as long as it has access to a provider it should be able to set up replication, -back up data, and restore data. - -Since there could be many protection providers with varied features and options -each protection provider exposes what options it provides for each protectable. -This allows the UI to dynamically adapt to each provider and show the user what -options are available, what they mean and what values are supported. - -This allows us to extend the providers without updates to Karbor and allow -provider implementation to easily add specialize options. - -Example -~~~~~~~ - -Let's take the OpenStack::Cinder::Volume resource *Protect* action. - -One of the action parameters in the Parameters Schema will be -"Consistency Level":: - - "parameters_schema" : { - "type": "object", - "properties": { - "consistency_level": { - "title": "Consistency Level", - "description": "The preferred consistency level", - "enum": ["Crash", "OS", "Application"] - } - } - } - -Protection Plans ----------------- - -Protection plan encapsulate all the information about the protection of the -project. They define what you want to protect, what protection provider -will be used for this plan, and what specialized options will be passed to the -provider. - -There are two main aspect to protection plan. The first is the continuous -aspect. When a plans is started it becomes enabled and continues protection -processes are started and monitored (eg. replication). As long as the plan is -active Karbor will try and make sure the continuous aspects are active and valid. - -The other aspect is point in time protection or, as we call them in Karbor, -checkpoints. Checkpoints are saved in the protection provider paired with the -plan and, as stated, represent a restorable point in time for the plan. When a -checkpoint is created Karbor will store in the protection provider all the -information required to successfully restore the project covered by the plan -to how it was at that specific point in time. - -Automatic Operation -------------------- -Automatic operations are process that the user want to perform without manual -intervention. Up until now we described how to manually manage plans and -checkpoints. The user can start and suspend plans and create and delete backups -manually whenever it wants. This is perfect for small scale deployments but -most administrators will want to have these operations automated. As an example -they would like to set up checkpoints every day or disable replication over -the weekend when the system is not in use. - -Automatic operations are varied and their features vary by operation type. -There are simple operation like "back up plan" which creates a single -checkpoints at the user requested time or even. And there are more complex -automatic operations like the RetentionPlan which allows the user to define a -complex retention plan to automate the creation and deletion of checkpoints. - -Protectables ------------- -Protectables are any class or type of entity that can be protected by Karbor. -Since setups might have different entities they would like to protect Karbor -doesn't bind the API to specific entity types. The admin can even add new -protectables during set up as long as the protection provider can handle those -entities. This flexibility means that Karbor is agnostic to the relationship -between the resources being backed up. - diff --git a/doc/source/releasenotes.rst b/doc/source/releasenotes.rst deleted file mode 100644 index 3a255a42..00000000 --- a/doc/source/releasenotes.rst +++ /dev/null @@ -1,5 +0,0 @@ -============== - Release Notes -============== - -.. release-notes:: diff --git a/doc/source/sample_policy.rst b/doc/source/sample_policy.rst deleted file mode 100644 index 6e35cbff..00000000 --- a/doc/source/sample_policy.rst +++ /dev/null @@ -1,15 +0,0 @@ -==================== -Karbor Sample Policy -==================== - -The following is a sample Karbor policy file that has been auto-generated -from default policy values in code. If you're using the default policies, then -the maintenance of this file is not necessary, and it should not be copied into -a deployment. Doing so will result in duplicate policy definitions. It is here -to help explain which policy operations protect specific Karbor APIs, but it -is not suggested to copy and paste into a deployment unless you're planning on -providing a different policy for an operation that is not the default. - -The sample policy file can also be viewed in `file form <_static/karbor.policy.yaml.sample>`_. - -.. literalinclude:: _static/karbor.policy.yaml.sample \ No newline at end of file diff --git a/doc/source/specs/add-db-manage-purge.rst b/doc/source/specs/add-db-manage-purge.rst deleted file mode 100644 index 57264ffa..00000000 --- a/doc/source/specs/add-db-manage-purge.rst +++ /dev/null @@ -1,153 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================= -Karbor db purge utility -======================= - -https://blueprints.launchpad.net/karbor/+spec/clean-deleted-data-in-db - -This spec adds the ability to sanely and safely purge deleted rows from -the karbor database for all relevant tables. Presently, we keep all deleted -rows. I believe this is unmaintainable as we move towards more upgradable -releases. Today, the operators depend on manual DB queries to delete this -data, but this exposes the DB to human errors. - -The goal is to have this be an extension to the `karbor-manage db` command. -Similar specs are being submitted to all the various projects(Cinder, Glance) -that touch a database. - -Problem description -=================== - -Very long lived OpenStack installations will carry around database rows -for years and years. This brings following problems: - -* If deleted data is kept in the DB, the number of rows can grow very large - taking up the disk space of nodes. Larger disk space means more worry - for disaster recovery, long running non-differential backups, etc. - -* Large number of deleted rows also means, an admin or authorized owner - querying for the corresponding rows will get 5xx responses timing out - on the DB, eventually slowing down other queries and API performance. - -* DB upgradeability is a big challenge if the older data style are less - or inconsistent with the latest formats. - -To date, there is no "mechanism" to programmatically purge the deleted -data. - -Proposed change -=============== - -The proposal is to add a "purge" method to DbCommands in -karbor/karbor/cmd/manage.py. This will take a number of days argument, -and use that for a data_sub match Like. -Like:: - - DELETE FROM plans - WHERE deleted != 0 AND deleted_at > data_sub(NOW()...) - -Alternatives ------------- - -Today, this can be accomplished manually with SQL commands, or via script. - -Data model impact ------------------ - -None, all tables presently include a "deleted_at" column. - -REST API impact ---------------- - -None, this would be run from karbor-manage - -CLI impact ----------- - -A new karbor-manage command will be added:: - - karbor-manage db purge - -Security impact ---------------- - -None, This only touches already deleted rows. - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -This has the potential to improve performance for very large databases. -Very long-lived installations can suffer from inefficient operations -on large tables. -This would have negative DB performance impact while the purge is running. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - chenying - -Work Items ----------- - -Implement 'db purge' command. -Add tests to confirm functionality -Add documentation of feature - -Dependencies -============ - -None - -Testing -======= - -The test will be written as such. Three rows will be inserted into a test db. -Two will be "deleted=1", one will be "deleted=0" -One of the deleted rows will have "deleted_at" be NOW(), the other will be -"deleted_at" a few days ago, lets say 10. The test will call the new -function with the argument of "7", to verify that only the row that was -deleted at 10 days ago will be purged. The two other rows should remain. - -Documentation Impact -==================== - -Documentation of this feature will be added to the admin guide and -developer reference. - -References -========== - -This is already discussed and accepted in other OpenStack components, -such as Glance [1] and Cinder [2]. - -[1] https://specs.openstack.org/openstack/glance-specs/specs/mitaka/database-purge.html -[2] https://specs.openstack.org/openstack/cinder-specs/specs/kilo/database-purge.html diff --git a/doc/source/specs/adding-more-protection-parameters-retention-period.rst b/doc/source/specs/adding-more-protection-parameters-retention-period.rst deleted file mode 100644 index 8754c4c3..00000000 --- a/doc/source/specs/adding-more-protection-parameters-retention-period.rst +++ /dev/null @@ -1,170 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================== -Adding more protection parameters:retention period -================================================== - -https://blueprints.launchpad.net/karbor/+spec/adding-more-protection-parameters-retention-period - -Problem description -=================== - -Karbor supports the scheduled operation according the protection plan. As time -goes on, checkpoints are getting more and more. For example, if we create a -scheduled plan: protect resource every day, there are 7 checkpoints one week, -and about 30 checkpoints one month, etc. If we don't clean them manually, a -lot of checkpoint takes up our valuable storage resources. - -In this specification we will introduce protection parameters:retention -period, max backup number. If the create time of the checkpoint is older -than retention period, we need to delete it, and if the checkpoints -number is more than max backup number, we need to delete the oldest one. - -Use Cases -========= - -Users create a scheduledoperation, in addition to setting the scheduling -period and plan, he can also set the retention period or (and) checkpoint -maximum number. - -Proposed change -=============== - -Adding more protection parameters:retention period: ---------------------------------------------------- -Add protection parameters in scheduledoperation: max_backups and -retention_duration. These parameters are used to control the number -of checkpoints to prevent excessive storage of resources due to -excessive checkpoints. - -1. Delete checkpoints in the auto-scheduled mode: -Get max_backups and retention_duration from the scheduledoperation. - -When creating the checkpoint is complete, get all the available checkpoints in the plan -and sort them according to created_at by desc, delete the older checkpoints -that exceeds the max_backups; and delete the checkpoints that their created_at -are older than the retention_duration. - - -protection parameters:retention schema: ---------------------------------------- -:: - - karbor scheduledoperation-create 'OS volumes retention protection' retention_protect 95e45924-49f4-4c12-b06f-5ec3c6245435 - "plan_id"="49dd4b84-a8f9-4592-b7d4-be1e37175af6","provider_id"="cf56bd3e-97a7-4078-b6d5-f36246333fd9","max_backups"=3,"retention_duration"=10 - - +----------------------+-----------------------------------------------------------+ - | Property | Value | - +----------------------+-----------------------------------------------------------+ - | description | None | - | enabled | True | - | id | 2c39406d-209b-4cba-88b4-2d9c0826eb39 | - | name | OS volumes retention protection | - | operation_definition | { | - | | "max_backups": "3", | - | | "plan_id": "49dd4b84-a8f9-4592-b7d4-be1e37175af6", | - | | "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", | - | | "retention_duration": "10" | - | | } | - | operation_type | retention_protect | - | trigger_id | 95e45924-49f4-4c12-b06f-5ec3c6245435 | - +----------------------+-----------------------------------------------------------+ - -Note 1: max_backups: -For example, "max_backups=3" indicates maximum retention for 3 backups. -For example, "max_backups=10" indicates maximum retention for 10 backups. - -Note 2: retention_duration's unit is day. -For example, "retention_duration=10" indicates maximum retention for 10 days. -For example, "retention_duration=14" indicates maximum retention for 2 weeks. - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -gengchc2 - -Work Items ----------- - -* -* Delete checkpoint in the auto-scheduled mode: older checkpoints based on max_backups - and retention_duration in the protection plan. -* Write tests - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -None - - -References -========== - -None diff --git a/doc/source/specs/api-json-schema-validation.rst b/doc/source/specs/api-json-schema-validation.rst deleted file mode 100644 index f7e4ca39..00000000 --- a/doc/source/specs/api-json-schema-validation.rst +++ /dev/null @@ -1,284 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============== -API Validation -============== - -https://blueprints.launchpad.net/karbor/+spec/karbor-json-schema-validation - -Currently, Karbor has different implementations for validating -request bodies. The purpose of this blueprint is to track the progress of -validating the request bodies sent to the Karbor server, accepting requests -that fit the resource schema and rejecting requests that do not fit the -schema. Depending on the content of the request body, the request should -be accepted or rejected consistently. - - -Problem description -=================== - -Currently Karbor doesn't have a consistent request validation layer. Some -resources validate input at the resource controller and some fail out in the -backend. Ideally, Karbor would have some validation in place to catch -disallowed parameters and return a validation error to the user. - -The end user will benefit from having consistent and helpful feedback, -regardless of which resource they are interacting with. - - -Use Cases -========= - -As a user or developer, I want to observe consistent API validation and values -passed to the Karbor API server. - - -Proposed change -=============== - -One possible way to validate the Karbor API is to use jsonschema similar to -Nova, Keystone and Glance (https://pypi.org/project/jsonschema). -A jsonschema validator object can be used to check each resource against an -appropriate schema for that resource. If the validation passes, the request -can follow the existing flow of control through the resource manager to the -backend. If the request body parameters fails the validation specified by the -resource schema, a validation error wrapped in HTTPBadRequest will be returned -from the server. - -Example: -"Invalid input for field 'name'. The value is 'some invalid name value'. - -Each API definition should be added with the following ways: - -* Create definition files under ./karbor/api/schemas/. -* Each definition should be described with JSON Schema. -* Each parameter of definitions(type, minLength, etc.) can be defined from - current validation code, DB schema, unit tests, or so on. - -Some notes on doing this implementation: - -* Common parameter types can be leveraged across all Karbor resources. An - example of this would be as follows:: - - from karbor.api.validation import parameter_types - # plan create schema - - create = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'plan': { - 'type': 'object', - 'properties': { - 'name': parameter_types.name, - 'description': parameter_types.description, - 'provider_id': parameter_types.uuid, - 'parameters': parameter_types.metadata, - 'resources': parameter_types.metadata, - }, - 'required': ['provider_id', 'parameters'], - 'additionalProperties': False, - }, - }, - 'required': ['plan'], - 'additionalProperties': False, - } - - parameter_types.py: - - name = { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - } - - description = { - 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255, - 'pattern': valid_description_regex, - } - - uuid = { - 'type': 'string', 'format': 'uuid' - } - - # This registers a FormatChecker on the jsonschema module. - # It might appear that nothing is using the decorated method but it gets - # used in JSON schema validations to check uuid formatted strings. - from oslo_utils import uuidutils - - @jsonschema.FormatChecker.cls_checks('uuid') - def _validate_uuid_format(instance): - return uuidutils.is_uuid_like(instance) - -* The validation can take place at the controller layer using below decorator:: - - from karbor.api.schemas import plans as plan - - @validation.schema(plan.create) - def create(self, req, body): - """Creates a new plan." - - -* When adding a new API resources to Karbor, the new resource must be proposed - with its appropriate schema. - - -Alternatives ------------- - -Before the API validation framework, we needed to add the validation code into -each API method in ad-hoc. These changes would make the API method code dirty -and we need to create multiple patches due to incomplete validation. - -If using JSON Schema definitions instead, acceptable request formats are clear -and we don't need to do ad-hoc works in the future. - - -Data model impact ------------------ - -None - - -REST API impact ---------------- - -API Response code changes: - -There are some occurrences where API response code will change while adding -schema layer for them. For example, On current master 'services' table has -'host' and 'binary' of maximum 255 characters in database table. While updating -service user can pass 'host' and 'binary' of more than 255 characters which -obviously fails with 404 ServiceNotFound wasting a database call. For this we -can restrict the 'host' and 'binary' of maximum 255 characters only in schema -definition of 'services'. If user passes more than 255 characters, he/she will -get 400 BadRequest in response. - -API Response error messages: - -There will be change in the error message returned to user. For example, -On current master if user passes more than 255 characters for volume name -then below error message is returned to user from karbor-api: - -Invalid input received: name has -characters, more than 255. - -With schema validation below error message will be returned to user for this -case: - -Invalid input for field/attribute name. Value: . -'' is too long. - - -Security impact ---------------- - -The output from the request validation layer should not compromise data or -expose private data to an external user. Request validation should not -return information upon successful validation. In the event a request -body is not valid, the validation layer should return the invalid values -and/or the values required by the request, of which the end user should know. -The parameters of the resources being validated are public information, -described in the Karbor API spec, with the exception of private data. -In the event the user's private data fails validation, a check can be built -into the error handling of the validator not to return the actual value of the -private data. - -jsonschema documentation notes security considerations for both schemas and -instances: -http://json-schema.org/latest/json-schema-core.html#anchor21 - -Better up front input validation will reduce the ability for malicious user -input to exploit security bugs. - - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -Karbor will need some performance cost for this comprehensive request -parameters validation, because the checks will be increased for API parameters -which are not validated now. - - -Other deployer impact ---------------------- - -None - - -Developer impact ----------------- - -This will require developers contributing new extensions to Karbor to have -a proper schema representing the extension's API. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -chenying : - -Work Items ----------- - -1. Initial validator implementation, which will contain common validator code - designed to be shared across all resource controllers validating request - bodies. -2. Introduce validation schemas for existing API resources. -3. Enforce validation on proposed API additions and extensions. -4. Remove duplicated ad-hoc validation code. -5. Add unit and end-to-end tests of related APIs. -6. Add/Update Karbor documentation. - -Dependencies -============ - -None - - -Testing -======= - -Some tests can be added as each resource is validated against its schema. -These tests should walk through invalid request types. - -Documentation Impact -==================== - -1. The Karbor API documentation will need to be updated to reflect the - REST API changes. -2. The Karbor developer documentation will need to be updated to explain - how the schema validation will work and how to add json schema for - new API's. - - -References -========== - -Useful Links: - -* [Understanding JSON Schema] (http://spacetelescope.github.io/understanding-json-schema/reference/object.html) - -* [Nova Validation Examples] (https://opendev.org/openstack/nova/src/branch/master/nova/api/validation) - -* [JSON Schema on PyPI] (https://pypi.org/project/jsonschema) - -* [JSON Schema core definitions and terminology] (http://tools.ietf.org/html/draft-zyp-json-schema-04) - -* [JSON Schema Documentation] (http://json-schema.org/documentation.html) diff --git a/doc/source/specs/api-service.rst b/doc/source/specs/api-service.rst deleted file mode 100644 index 4981e0ff..00000000 --- a/doc/source/specs/api-service.rst +++ /dev/null @@ -1,258 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========== -API Service -=========== - -https://review.opendev.org/#/c/266338/ - -The APIs expose Application Data Protection services to the Karbor user. - -The purpose of the services is to maximize flexibility and accommodate -for (hopefully) any kind of protection for any type of resource, whether -it is a basic OpenStack resource (such as a VM, Volume, Image, etc.) or -some ancillary resource within an application system that is not managed -in OpenStack (such as a hardware device, an external database, etc.). - - - -========================= -WSGI Resources Controller -========================= - -The WSGI Controller handles incoming web requests that are dispatched -from the WSGI application APIRouter. - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/api-service-class-diagram.png - -From the module class graph, api service basically have following -resources Controller: - -Provider Controller -------------------- -Enables the Karbor user to list available providers and get parameters and -result schema super-set for all plugins of a specific Provider. - - -Checkpoint Controller ---------------------- -Enables the Karbor user to access and manage the checkpoints stored -in the protection provider. - - -Protectable Controller ----------------------- - -Enables the Karbor user to access information about which resource types -are protectable (i.e. can be protected by Karbor). -In addition, enables the user to get additional information on each -resource type, such as a list of actual instances and their dependencies. - -Plan Controller ---------------- - -This API enables the Karbor user to access the protection Plan registry -and do the following operations: - -- Plan CRUD. -- List Plans. -- Starting and suspending of plans. - - -Scheduled Operation Controller ------------------------------- - -This API enables the Karbor user to manage Scheduled Operations: - -- Operation CRUD. -- List Operations. - -Trigger Controller ------------------- - -This API enables the Karbor user to manage Triggers: -A trigger only can be deleted when it isn't used in any of the -scheduled operation. - -- Trigger CRUD. -- List Triggers. - - -Restore Controller ------------------- - -This API enables the Karbor user restore a checkpoint on to a restore target: - -- Create restored system from a checkpoint. - - -============================ -API Service Data base tables -============================ - - - -time_triggers and scheduled_operations database tables are the same as -tables in the operation engine design. - -1. plans - -+-------------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+=========================+==============+======+=====+=========+=======+ -| id | varchar(36) | NO | PRI | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| name | varchar(255) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| provider_id | varchar(36) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| project_id | varchar(255) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| status | varchar(64) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted | Boolean | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ - -2. resources - -+-------------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+=========================+==============+======+=====+=========+=======+ -| id | Integer | NO | PRI | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| plan_id | varchar(255) | NO | FOR | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resource_id | varchar(36) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resource_type | varchar(64) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted | Boolean | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ - -3. restores - -+-----------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+=================+==============+======+=====+=========+=======+ -| id | varchar(36) | NO | PRI | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| project_id | varchar(255) | NO | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| provider_id | varchar(36) | NO | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| checkpoint_id | varchar(36) | NO | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| restore_target | varchar(255) | NO | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| parameters | varchar(255) | NO | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| status | varchar(64) | NO | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ -| deleted | Boolean | NO | | NULL | | -+-----------------+--------------+------+-----+---------+-------+ - -4. triggers - -+--------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+====================+==============+======+=====+=========+=======+ -| id | varchar(36) | NO | PRI | NULL | | -+--------------------+--------------+------+-----+---------+-------+ -| name | varchar(255) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+-------+ -| project_id | varchar(255) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+-------+ -| type | varchar(64) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+-------+ -| properties | TEXT | NO | | NULL | | -+--------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+-------+ -| deleted | Boolean | NO | | NULL | | -+--------------------+--------------+------+-----+---------+-------+ - -5. scheduled_operations - -+----------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+======================+==============+======+=====+=========+=======+ -| id | varchar(36) | NO | PRI | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| name | varchar(255) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| operation_type | varchar(64) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| project_id | varchar(255) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| trigger_id | varchar(36) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| operation_definition | TEXT | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| deleted | Boolean | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ - -5. services - -+----------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+======================+==============+======+=====+=========+=======+ -| id | Integer | NO | PRI | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| host | varchar(255) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| binary | varchar(255) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| topic | varchar(255) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| report_count | Integer | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| disabled | Boolean | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| disabled_reason | varchar(255) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| modified_at | Datetime | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| rpc_current_version | varchar(36) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| rpc_available_version| varchar(36) | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ -| deleted | Boolean | NO | | NULL | | -+----------------------+--------------+------+-----+---------+-------+ diff --git a/doc/source/specs/api/api_examples.md b/doc/source/specs/api/api_examples.md deleted file mode 100644 index 33fad0d1..00000000 --- a/doc/source/specs/api/api_examples.md +++ /dev/null @@ -1,1079 +0,0 @@ -# Karbor API # - ----------- - -## Protection Provider ## - -### List Protection Providers ### - -> **get** : /v1/{project_id}/providers - -#### Response JSON #### -```json -{ - "providers": [ - { - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "name": "OS Infra Provider", - "description": "This provider uses OpenStack's own services (swift, cinder) as storage", - "extended_info_schema": { - "options_schema": { - "OS::Cinder::Volume": { - "required": [ - "backup_mode" - ], - "type": "object", - "properties": { - "backup_mode": { - "default": "auto", - "enum": [ - "full", - "incremental", - "auto" - ], - "type": "string", - "description": "The backup mode.", - "title": "Backup Mode" - } - }, - "title": "Cinder Protection Options" - } - }, - "saved_info_schema": { - "OS::Cinder::Volume": { - "required": [ - "name" - ], - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name for this backup.", - "title": "Name" - } - }, - "title": "Cinder Protection Saved Info" - } - }, - "restore_schema": { - "OS::Cinder::Volume": { - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "description": "The name of the restored volume.", - "title": "Restore Name" - } - }, - "title": "Cinder Protection Restore" - } - } - } - } - ], - "providers_links": [ - { - "href": "/v1/{project_id}/providers?limit={limit_num}&marker=cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "rel": "next" - } - ] -} -``` - -### Show Protection Provider ### -> **get** : /v1/{project_id}/providers/{provider_id} -#### Response JSON #### -```json -{ - "provider": { - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "name": "OS Infra Provider", - "description": "This provider uses OpenStack's own services (swift, cinder) as storage", - "extended_info_schema": { - "options_schema": { - "OS::Cinder::Volume": { - "required": [ - "backup_mode" - ], - "type": "object", - "properties": { - "backup_mode": { - "default": "auto", - "enum": [ - "full", - "incremental", - "auto" - ], - "type": "string", - "description": "The backup mode.", - "title": "Backup Mode" - } - }, - "title": "Cinder Protection Options" - } - }, - "saved_info_schema": { - "OS::Cinder::Volume": { - "required": [ - "name" - ], - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name for this backup.", - "title": "Name" - } - }, - "title": "Cinder Protection Saved Info" - } - }, - "restore_schema": { - "OS::Cinder::Volume": { - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "description": "The name of the restored volume.", - "title": "Restore Name" - } - }, - "title": "Cinder Protection Restore" - } - } - } - } -} -``` - ----------- - -## Checkpoint ## - -### List Checkpoints ### -> **get** : /v1/{project_id}/providers/{provider_id}/checkpoints -#### Response JSON #### -```json -{ - "checkpoints": [ - { - "id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "project_id": "e486a2f49695423ca9c47e589b948108", - "status": "available", - "protection_plan": { - "id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "name": "My 3 tier application", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume" - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume" - } - ] - }, - "resource_graph": "[{'0x3': ['OS::Cinder::Volume', '33b6bb0b-1157-4e66-8553-1c9e14b1c7ba', 'Data volume'], '0x2': ['OS::Cinder::Volume', '25336116-f38e-4c22-81ad-e9b7bd71ba51', 'System volume'], '0x1': ['OS::Nova::Server', 'cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01', 'App server'], '0x0': ['OS::Glance::Image', '99777fdd-8a5b-45ab-ba2c-52420008103f', 'cirros-0.3.4-x86_64-uec']}, [['0x1', ['0x0']]]]" - } - ], - "checkpoints_links": [ - { - "href": "/v1/{project_id}/checkpoints?limit={limit_num}&marker=dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "rel": "next" - } - ] -} -``` - -### Create Checkpoint ### -> **post** : /v1/{project_id}/providers/{provider_id}/checkpoints -#### Request JSON #### -```json -{ - "checkpoint": { - "plan_id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "extra_info": { - "create-by": "operation-engine", - "trigger_id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - } - } -} -``` -#### Response JSON #### -```json -{ - "checkpoint": { - "id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "project_id": "e486a2f49695423ca9c47e589b948108", - "status": "available", - "protection_plan": { - "id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "name": "My 3 tier application", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume" - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume" - } - ] - }, - "resource_graph": "[{'0x3': ['OS::Cinder::Volume', '33b6bb0b-1157-4e66-8553-1c9e14b1c7ba', 'Data volume'], '0x2': ['OS::Cinder::Volume', '25336116-f38e-4c22-81ad-e9b7bd71ba51', 'System volume'], '0x1': ['OS::Nova::Server', 'cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01', 'App server'], '0x0': ['OS::Glance::Image', '99777fdd-8a5b-45ab-ba2c-52420008103f', 'cirros-0.3.4-x86_64-uec']}, [['0x1', ['0x0']]]]" - } -} -``` - -### Show Checkpoint ### -> **get** : /v1/{project_id}/providers/{provider_id}/checkpoints/{checkpoint_id} -#### Response JSON #### -```json -{ - "checkpoint": { - "id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "project_id": "e486a2f49695423ca9c47e589b948108", - "status": "available", - "protection_plan": { - "id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "name": "My 3 tier application", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume" - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume" - } - ] - }, - "resource_graph": "[{'0x3': ['OS::Cinder::Volume', '33b6bb0b-1157-4e66-8553-1c9e14b1c7ba', 'Data volume'], '0x2': ['OS::Cinder::Volume', '25336116-f38e-4c22-81ad-e9b7bd71ba51', 'System volume'], '0x1': ['OS::Nova::Server', 'cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01', 'App server'], '0x0': ['OS::Glance::Image', '99777fdd-8a5b-45ab-ba2c-52420008103f', 'cirros-0.3.4-x86_64-uec']}, [['0x1', ['0x0']]]]" - } -} -``` - -### Delete Checkpoint ### -> **delete** : /v1/{project_id}/providers/{provider_id}/checkpoints/{checkpoint_id} -#### Response JSON #### -```json -{} -``` - ----------- - -## Plan ## - -### List Plans ### -> **get** : /v1/{project_id}/plans -#### Response JSON #### -```json -{ - "plans": [ - { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "suspended", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } - ], - "plans_links": [ - { - "href": "/v1/{project_id}/plans?limit={limit_num}&marker=9e5475d2-6425-4986-9136-a4f09642297f", - "rel": "next" - } - ] -} -``` - -### Create Plan ### -> **post** : /v1/{project_id}/plans -#### Request JSON #### -```json -{ - "plan": { - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } -} -``` - -#### Response JSON #### -```json -{ - "plan": { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "suspended", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } -} -``` - -### Show Plan ### -> **get** : /v1/{project_id}/plans/{plan_id} -#### Response JSON #### -```json -{ - "plan": { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "suspended", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } -} -``` - -### Update Plan ### -> **put** : /v1/{project_id}/plans/{plan_id} -#### Request JSON #### -```json -{ - "plan":{ - "status": "started", - "name": "My 1 tier application" - } -} -``` - -#### Response JSON #### -```json -{ - "plan": { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 1 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "started", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } -} -``` - -### Delete Plan ### -> **delete** : /v1/{project_id}/plans/{plan_id} -#### Response JSON #### -```json -None -``` - ----------- - -## Protectable ## - -### List Protectable Types ### -> **get** : /v1/{project_id}/protectables -#### Response JSON #### -```json -{ - "protectable_type": [ - "OS::Keystone::Project", - "OS::Cinder::Volume", - "OS::Cinder::ConsistencyGroup", - "OS::Glance::Image", - "OS::Nova::Server" - ] -} -``` - -### Show Protectable Type ### -> **get** : /v1/{project_id}/protectables/{protectable_type} -#### Response JSON #### -```json -{ - "protectable_type": { - "name": "OS::Nova::Server", - "dependent_types": [ - "OS::Cinder::Volume", - "OS::Glance::Image" - ] - } -} -``` - -### List Protectable Instances ### -> **get** : /v1/{project_id}/protectables/{protectable_type}/instances -#### Response JSON #### -```json -{ - "instances": [ - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ] - "instances_links": [ - { - "href": "/v1/{project_id}/instances?limit=1&marker=cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "rel": "next" - } - ] -} -``` - -### Show Protectable Instance ### -> **get** : /v1/{project_id}/protectables/{protectable_type}/instances/{resource_id} -#### Response JSON #### -```json -{ - "instance": { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "My VM", - "dependent_resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - } - ] - } -} -``` - ----------- - -## Scheduled Operation ## - -### List Scheduled Operations ### -> **get** : /v1/{project_id}/scheduled_operations -#### Response JSON #### -```json -{"operations": [ - {"scheduled_operation": { - "id": "1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "23902b02-5666-4ee6-8dfe-962ac09c3995", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - }, - "enabled": 1 - } - }, - ], - "operations_links": "" -} -``` - -### Create Scheduled Operation ### -> **post** : /v1/{project_id}/scheduled_operations -#### Request JSON #### -```json -{"scheduled_operation": { - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "23902b02-5666-4ee6-8dfe-962ac09c3995", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - } - } -} -``` - -#### Response JSON #### -```json -{"scheduled_operation": { - "id": "1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "23902b02-5666-4ee6-8dfe-962ac09c3995", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - }, - "enabled": 1 - } -} -``` - -### Show Scheduled Operation ### -> **get** : /v1/{project_id}/scheduled_operations/{scheduled_operation_id} -#### Response JSON #### -```json -{"scheduled_operation": { - "id": "1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "23902b02-5666-4ee6-8dfe-962ac09c3995", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - }, - "enabled": 1 - } -} -``` - -### Delete Scheduled Operation ### -> **delete** : /v1/{project_id}/scheduled_operations/{scheduled_operation_id} -#### Response JSON #### -```json -None -``` - ----------- - -## Restores ## - -### List Restores ### -> **get** : /v1/{project_id}/restores -#### Response JSON #### -```json -{ - "restores": [ - { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "parameters": { - "username": "admin", - "password": "***" - }, - "status": "success" - } - ], - "restores_links": [ - { - "href": "/v1/{project_id}/restores?limit={limit_num}&marker=22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "rel": "next" - } - ] -} -``` - -### Create Restore ### -> **post** : /v1/{project_id}/restores -#### Request JSON #### -```json -{ - "restore": { - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "restore_auth": { - "type": "password", - "username": "admin", - "password": "secretadmin" - }, - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - } - } -} -``` - -#### Response JSON #### -```json -{ - "restore": { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/v3", - "restore_auth": { - "type": "password", - "username": "admin", - "password": "***" - }, - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "status": "success" - } -} -``` - -### Show Restore ### -> **get** : /v1/{project_id}/restores/{restore_id} -#### Response JSON #### -```json -{ - "restore": { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "parameters": { - "username": "admin", - "password": "***" - }, - "status": "success" - } -} -``` - ----------- - -## Trigger ## - -### List Triggers ### -> **get** : /v1/{project_id}/triggers -#### Response JSON #### -```json -{"triggers": [ - {"trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "type": "time", - "name": "My backup trigger", - "properties": { - "format": "crontab", - "pattern": "0 9 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600", - } - } - }, - ], - "triggers_links": "" -} -``` - -### Create Trigger ### -> **post** : /v1/{project_id}/triggers -#### Request JSON #### -```json -{"trigger_info": { - "name": "My backup trigger", - "type": "time", - "properties": { - "format": "crontab", - "pattern": "0 9 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600", - } - } -} -``` - -#### Response JSON #### -```json -{"trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "type": "time", - "name": "My backup trigger", - "properties": { - "format": "crontab", - "pattern": "0 9 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600", - } - } -} -``` - -### Update Trigger ### -> **put** : /v1/{project_id}/triggers/{trigger_id} -#### Request JSON #### -```json -{"trigger_info": { - "properties": { - "format": "crontab", - "pattern": "0 10 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600", - } - } -} -``` - -#### Response JSON #### -```json -{"trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "type": "time", - "name": "My backup trigger", - "properties": { - "format": "crontab", - "pattern": "0 10 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600", - } - } -} -``` - -### Show Trigger ### -> **get** : /v1/{project_id}/triggers/{trigger_id} -#### Response JSON #### -```json -{"trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "type": "time", - "name": "My backup trigger", - "properties": { - "format": "crontab", - "pattern": "0 9 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "3600", - } - } -} -``` - -### Delete Trigger ### -> **delete** : /v1/{project_id}/triggers/{trigger_id} -#### Response JSON #### -```json -None -``` - ----------- - -## Operation_logs ## - -### List Operation_logs ### -> **get** : /v1/{project_id}/operation_logs -#### Response JSON #### -```json -{ - "operation_logs": [{ - "status": "deleted", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T09:02:57.000000", - "started_at": "2017-07-28T09:02:41.000000", - "id": "f0aa664b-f385-4618-bc27-9e0116cceea7", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - }, - { - "status": "success", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": "7c0d396a-981b-4953-95f5-30382ddaa8bf", - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:58:08.000000", - "started_at": "2017-07-28T08:57:36.000000", - "id": "8736649d-857e-4637-923c-3bdb35edd74e", - "extra_info": null, - "plan_id": null, - "scheduled_operation_id": null, - "operation_type": "restore" - }, - { - "status": "available", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:43:22.000000", - "started_at": "2017-07-28T08:42:02.000000", - "id": "7a16c731-0658-47dd-aa3b-98ee21830e23", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - }] -} -``` - -### Show operation_logs ### -> **get** : /v1/{project_id}/operation_logs/{operation_log_id} -#### Response JSON #### -```json -{ - "operation_log": { - "status": "available", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:43:22.000000", - "started_at": "2017-07-28T08:42:02.000000", - "id": "7a16c731-0658-47dd-aa3b-98ee21830e23", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - } -} -``` - ----------- \ No newline at end of file diff --git a/doc/source/specs/api/class_diagram.pu b/doc/source/specs/api/class_diagram.pu deleted file mode 100644 index ceffc7db..00000000 --- a/doc/source/specs/api/class_diagram.pu +++ /dev/null @@ -1,109 +0,0 @@ -@startuml - -title "Karbor API model" - -class Protectable { - name: string - instances: []Resource - is_root: bool -} - -Protectable --> Resource: lists - -class Resource { - id: UUID - type: ResourceType - schema: JSONSchema - dependent_resources: []Resource -} - -class Trigger { -} - -class TimedTrigger extends Trigger { - -} - -class EventTrigger extends Trigger { - -} - -class Checkpoint { - id: UUID - tenant_id: UUID - plan: ProtectionPlan - status: string - started_at: DateTime -} - -Checkpoint *-> ProtectionPlan: stores a copy of - -class AutomaticOperation { - id: UUID - name: string - description: string - tenant_id: UUID -} - -class ScheduledOperation <> extends AutomaticOperation { - trigger: Trigger -} - -ScheduledOperation *- Trigger: when should the operation should trigger - - -class BackupPlan extends ScheduledOperation { - protection_plan: ProtectionPlan -} - -BackupPlan *--> ProtectionPlan - -class DeleteCheckpoints extends ScheduledOperation { - query: string - protection_provider: ProtectionProvider -} - - -class ProtectionProvider { - name: string - description: string - extended_info_schema: [ResourceType]JSONSchema -} - -ProtectionProvider o-> Checkpoint: lists - -class ProtectionPlan { - id: UUID - is_enabled: boolean - name: string - status: ePlanStatus - resources: []Resource - protection_provider: ProtectionProvider - parameters: dict -} - -ProtectionPlan "1" *--> "N" Resource: aggregates -ProtectionPlan -> ProtectionProvider -ProtectionPlan -> ePlanStatus - -enum ePlanStatus { - started - suspended -} - -class RestoreTarget { - keystone_uri: URI -} - -class Restore { - id: UUID - project_id: UUID - target: RestoreTarget - provider: ProtectionProvider - checkpoint: Checkpoint - started_at: string -} - -Restore *-> RestoreTarget: restores to - -@enduml diff --git a/doc/source/specs/api/karbor_api.v1.yaml b/doc/source/specs/api/karbor_api.v1.yaml deleted file mode 100644 index ba5bcfd9..00000000 --- a/doc/source/specs/api/karbor_api.v1.yaml +++ /dev/null @@ -1,2082 +0,0 @@ -swagger: '2.0' -info: - title: Karbor API - description: Protect all you hold dear - version: 0.99.0 -host: api.karbor.nowhere.com -schemes: - - https -basePath: /v1 -produces: - - application/json -paths: - /{project_id}/providers: - get: - summary: Providers - description: | - The Providers endpoint returns information about the providers - offered at a given service. All providers need to be configured - first by the admin. - parameters: - - $ref: '#/parameters/projectParam' - tags: - - Protection Provider - responses: - '200': - description: An array of providers - schema: - type: array - items: - $ref: '#/definitions/Provider' - examples: - application/json: { - "providers": [ - { - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "name": "OS Infra Provider", - "description": "This provider uses OpenStack's own services (swift, cinder) as storage", - "extended_info_schema": { - "options_schema": { - "OS::Cinder::Volume": { - "required": [ - "backup_mode" - ], - "type": "object", - "properties": { - "backup_mode": { - "default": "auto", - "enum": [ - "full", - "incremental", - "auto" - ], - "type": "string", - "description": "The backup mode.", - "title": "Backup Mode" - } - }, - "title": "Cinder Protection Options" - } - }, - "saved_info_schema": { - "OS::Cinder::Volume": { - "required": [ - "name" - ], - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name for this backup.", - "title": "Name" - } - }, - "title": "Cinder Protection Saved Info" - } - }, - "restore_schema": { - "OS::Cinder::Volume": { - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "description": "The name of the restored volume.", - "title": "Restore Name" - } - }, - "title": "Cinder Protection Restore" - } - } - } - } - ], - "providers_links": [ - { - "href": "/v1/{project_id}/providers?limit={limit_num}&marker=cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "rel": "next" - } - ] - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/providers/{provider_id}: - get: - summary: Provider - description: | - The Providers endpoint returns information about a specific provider. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/provider_idParam' - tags: - - Protection Provider - responses: - '200': - description: A protection provider. - schema: - $ref: '#/definitions/Provider' - examples: - application/json: { - "provider": { - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "name": "OS Infra Provider", - "description": "This provider uses OpenStack's own services (swift, cinder) as storage", - "extended_info_schema": { - "options_schema": { - "OS::Cinder::Volume": { - "required": [ - "backup_mode" - ], - "type": "object", - "properties": { - "backup_mode": { - "default": "auto", - "enum": [ - "full", - "incremental", - "auto" - ], - "type": "string", - "description": "The backup mode.", - "title": "Backup Mode" - } - }, - "title": "Cinder Protection Options" - } - }, - "saved_info_schema": { - "OS::Cinder::Volume": { - "required": [ - "name" - ], - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name for this backup.", - "title": "Name" - } - }, - "title": "Cinder Protection Saved Info" - } - }, - "restore_schema": { - "OS::Cinder::Volume": { - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "description": "The name of the restored volume.", - "title": "Restore Name" - } - }, - "title": "Cinder Protection Restore" - } - } - } - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - - /{project_id}/providers/{provider_id}/checkpoints: - get: - summary: List checkpoints - description: | - The checkpoints endpoint returns information about the checkpoints - offered at a given provider. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/provider_idParam' - - $ref: '#/parameters/statusFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - tags: - - Protection Provider - - Checkpoint - responses: - '200': - description: An array of checkpoints - schema: - type: array - items: - $ref: '#/definitions/Checkpoint' - examples: - application/json: { - "checkpoints": [ - { - "id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "project_id": "e486a2f49695423ca9c47e589b948108", - "status": "available", - "protection_plan": { - "id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "name": "My 3 tier application", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume" - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume" - } - ] - }, - "resource_graph": "[{'0x3': ['OS::Cinder::Volume', '33b6bb0b-1157-4e66-8553-1c9e14b1c7ba', 'Data volume'], '0x2': ['OS::Cinder::Volume', '25336116-f38e-4c22-81ad-e9b7bd71ba51', 'System volume'], '0x1': ['OS::Nova::Server', 'cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01', 'App server'], '0x0': ['OS::Glance::Image', '99777fdd-8a5b-45ab-ba2c-52420008103f', 'cirros-0.3.4-x86_64-uec']}, [['0x1', ['0x0']]]]" - } - ], - "checkpoints_links": [ - { - "href": "/v1/{project_id}/checkpoints?limit={limit_num}&marker=dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "rel": "next" - } - ] - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - post: - summary: Checkpoints - description: | - Execute the protect operation for the specified plan and create a - checkpoint at a given provider. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/provider_idParam' - - name: checkpoint - in: body - required: true - schema: - $ref: '#/definitions/CheckpointCreateBody' - tags: - - Protection Provider - - Checkpoint - responses: - '200': - description: Checkpoint created - schema: - $ref: '#/definitions/Checkpoint' - examples: - application/json: { - "checkpoint": { - "id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "project_id": "e486a2f49695423ca9c47e589b948108", - "status": "available", - "protection_plan": { - "id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "name": "My 3 tier application", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume" - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume" - } - ] - }, - "resource_graph": "[{'0x3': ['OS::Cinder::Volume', '33b6bb0b-1157-4e66-8553-1c9e14b1c7ba', 'Data volume'], '0x2': ['OS::Cinder::Volume', '25336116-f38e-4c22-81ad-e9b7bd71ba51', 'System volume'], '0x1': ['OS::Nova::Server', 'cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01', 'App server'], '0x0': ['OS::Glance::Image', '99777fdd-8a5b-45ab-ba2c-52420008103f', 'cirros-0.3.4-x86_64-uec']}, [['0x1', ['0x0']]]]" - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/providers/{provider_id}/checkpoints/{checkpoint_id}: - get: - summary: Get the specified checkpoint - description: | - The checkpoints endpoint returns information about the specified - checkpoint offered at a given provider. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/provider_idParam' - - $ref: '#/parameters/checkpoint_idParam' - tags: - - Protection Provider - - Checkpoint - responses: - '200': - description: The checkpoint information - schema: - $ref: '#/definitions/Checkpoint' - examples: - application/json: { - "checkpoint": { - "id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "project_id": "e486a2f49695423ca9c47e589b948108", - "status": "available", - "protection_plan": { - "id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "name": "My 3 tier application", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume" - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume" - } - ] - }, - "resource_graph": "[{'0x3': ['OS::Cinder::Volume', '33b6bb0b-1157-4e66-8553-1c9e14b1c7ba', 'Data volume'], '0x2': ['OS::Cinder::Volume', '25336116-f38e-4c22-81ad-e9b7bd71ba51', 'System volume'], '0x1': ['OS::Nova::Server', 'cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01', 'App server'], '0x0': ['OS::Glance::Image', '99777fdd-8a5b-45ab-ba2c-52420008103f', 'cirros-0.3.4-x86_64-uec']}, [['0x1', ['0x0']]]]" - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - delete: - summary: Delete checkpoint - description: | - The checkpoint endpoint deletes a checkpoint - at a given provider. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/provider_idParam' - - $ref: '#/parameters/checkpoint_idParam' - tags: - - Protection Provider - - Checkpoint - responses: - '200': - description: Checkpoint deleted - examples: - application/json: {} - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - - /{project_id}/plans: - get: - summary: Get protection plans - description: | - The Plans endpoint returns information about the protection plans - offered for the given project. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/nameFilterParam' - - $ref: '#/parameters/statusFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - tags: - - Project API - - Protection Plan - responses: - '200': - description: An array of protection plans - schema: - type: array - items: - $ref: '#/definitions/ProtectionPlan' - examples: - application/json: { - "plans": [ - { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "suspended", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } - ], - "plans_links": [ - { - "href": "/v1/{project_id}/plans?limit={limit_num}&marker=9e5475d2-6425-4986-9136-a4f09642297f", - "rel": "next" - } - ] - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - post: - summary: Create a plan - description: | - Create a new plan. The operation will create a new revision for - the plan. - tags: - - Project API - - Protection Plan - parameters: - - $ref: '#/parameters/projectParam' - - name: plan - in: body - required: true - schema: - $ref: '#/definitions/ProtectionPlan' - responses: - '200': - description: The new created plan information - schema: - $ref: '#/definitions/ProtectionPlan' - examples: - application/json: { - "plan": { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "suspended", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/plans/{plan_id}: - get: - summary: Protection Plan - description: | - The Plan endpoint returns information about a specific plan. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/plan_idParam' - tags: - - Project API - - Protection Plan - responses: - '200': - description: show the protection plan - schema: - $ref: '#/definitions/ProtectionPlan' - examples: - application/json: { - "plan": { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 3 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "suspended", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - put: - summary: Protection Plan - description: | - Update a specific plan. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/plan_idParam' - - name: plan - in: body - schema: - $ref: '#/definitions/ProtectionPlanUpdateBody' - tags: - - Project API - - Protection Plan - responses: - '200': - description: update the protection plan - schema: - $ref: '#/definitions/ProtectionPlan' - examples: - application/json: { - "plan": { - "id": "9e5475d2-6425-4986-9136-a4f09642297f", - "name": "My 1 tier application", - "resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - }, - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "App server" - }, - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "status": "started", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "parameters": { - "OS::Nova::Server": { - "backup_name": "os" - }, - "OS::Nova::Server#cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01": { - "backup_name": "crash" - }, - "OS::Cinder::Volume": { - "backup_name": "os" - }, - "OS::Cinder::Volume#33b6bb0b-1157-4e66-8553-1c9e14b1c7ba": { - "backup_name": "crash" - } - } - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - delete: - summary: Protection Plan - description: | - The Plan endpoint deletes a specific plan. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/plan_idParam' - tags: - - Project API - - Protection Plan - responses: - '200': - description: Protection plan deleted - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - - /{project_id}/protectables: - get: - summary: Protectables - description: | - Return all the available protectable types. - parameters: - - $ref: '#/parameters/projectParam' - tags: - - Protectable - responses: - '200': - description: The available protectable types - schema: - type: array - items: - $ref: '#/definitions/ProtectableType' - examples: - application/json: { - "protectable_type":[ - "OS::Keystone::Project", - "OS::Cinder::Volume", - "OS::Cinder::ConsistencyGroup", - "OS::Glance::Image", - "OS::Nova::Server" - ] - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/protectables/{protectable_type}: - get: - summary: Protectables - description: | - Return the information of a given protectable type. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/protectable_typeParam' - tags: - - Protectable - responses: - '200': - description: The protectable information - schema: - $ref: '#/definitions/ProtectableInfo' - examples: - application/json: { - "protectable_type": { - "name": "OS::Nova::Server", - "dependent_types": [ - "OS::Cinder::Volume", - "OS::Glance::Image" - ] - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/protectables/{protectable_type}/instances: - get: - summary: Resource Instances - description: | - Return all the available instances for the given protectable type. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/protectable_typeParam' - - $ref: '#/parameters/nameFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - - $ref: '#/parameters/parametersParam' - tags: - - Protectable - - Resource - responses: - '200': - description: The available instances for the protectable type. - schema: - type: array - items: - $ref: '#/definitions/Resource' - examples: - application/json: { - "instances": [ - { - "id": "25336116-f38e-4c22-81ad-e9b7bd71ba51", - "type": "OS::Cinder::Volume", - "name": "System volume", - "extra_info": { - "availability_zone": "az1" - } - }, - { - "id": "33b6bb0b-1157-4e66-8553-1c9e14b1c7ba", - "type": "OS::Cinder::Volume", - "name": "Data volume", - "extra_info": { - "availability_zone": "az1" - } - } - ], - "instances_links": [ - { - "href": "/v1/{project_id}/instances?limit=1&marker=cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "rel": "next" - } - ] - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/protectables/{protectable_type}/instances/{resource_id}: - get: - summary: Resource Instance - description: | - Return information about a specific instance and its immediate dependencies. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/protectable_typeParam' - - $ref: '#/parameters/resource_idParam' - - $ref: '#/parameters/parametersParam' - tags: - - Protectable - - Resource - responses: - '200': - description: Information about the instance. - schema: - $ref: '#/definitions/Resource' - examples: - application/json: { - "instance": { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "My VM", - "dependent_resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - } - ] - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - - /{project_id}/scheduled_operations: - get: - summary: Scheduled Operations - description: | - Scheduled operations are operations that will be executed when - a specific trigger is triggered. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/nameFilterParam' - - $ref: '#/parameters/scheduled_operation_typeFilterParam' - - $ref: '#/parameters/trigger_idFilterParam' - - $ref: '#/parameters/scheduled_operation_defFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - tags: - - Project API - - Scheduled Operation - responses: - '200': - description: An array of scheduled operations - schema: - type: array - items: - $ref: '#/definitions/ScheduledOperation' - examples: - application/json: { - "operations": [ - { - "scheduled_operation": { - "id": "1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - }, - "enabled": 1 - } - }, - ], - "operations_links": "" - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - post: - summary: Scheduled operation - description: | - Create a new scheduled operation. - tags: - - Project API - - Scheduled Operation - parameters: - - $ref: '#/parameters/projectParam' - - name: scheduled_operation - in: body - required: true - schema: - $ref: '#/definitions/ScheduledOperationCreateBody' - responses: - '200': - description: The new scheduled operation - schema: - $ref: '#/definitions/ScheduledOperation' - examples: - application/json: { - "scheduled_operation": { - "id": "1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - }, - "enabled": 1 - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/scheduled_operations/{scheduled_operation_id}: - get: - summary: Scheduled Operation - description: | - Get the specified scheduled operation information. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/scheduled_operation_idParam' - tags: - - Project API - - Scheduled Operation - responses: - '200': - description: A scheduled operation - schema: - $ref: '#/definitions/ScheduledOperation' - examples: - application/json: { - "scheduled_operation": { - "id": "1a2c0c3d-f402-4cd8-b5db-82e85cb51fad", - "name": "My scheduled operation", - "description": "It will run everyday", - "operation_type": "protect", - "trigger_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "operation_definition": { - "provider_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa399", - "plan_id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398" - }, - "enabled": 1 - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - delete: - summary: Scheduled operation - description: | - Delete a scheduled operation. - tags: - - Project API - - Scheduled Operation - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/scheduled_operation_idParam' - responses: - '200': - description: Scheduled operation deleted - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - - /{project_id}/operation_logs/: - get: - summary: Operation log. - description: | - Get the operation logs information about different operations(protect, delete, restore) - in karbor protection service. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/checkpoint_idFilterParam' - - $ref: '#/parameters/plan_idFilterParam' - - $ref: '#/parameters/restore_idFilterParam' - - $ref: '#/parameters/statusFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - tags: - - Project API - - Scheduled Operation - responses: - '200': - description: An array of operation logs - schema: - type: array - items: - $ref: '#/definitions/OperationLog' - examples: - application/json: { - "operation_logs": [{ - "status": "deleted", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T09:02:57.000000", - "started_at": "2017-07-28T09:02:41.000000", - "id": "f0aa664b-f385-4618-bc27-9e0116cceea7", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - }, - { - "status": "success", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": "7c0d396a-981b-4953-95f5-30382ddaa8bf", - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:58:08.000000", - "started_at": "2017-07-28T08:57:36.000000", - "id": "8736649d-857e-4637-923c-3bdb35edd74e", - "extra_info": null, - "plan_id": null, - "scheduled_operation_id": null, - "operation_type": "restore" - }, - { - "status": "available", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:43:22.000000", - "started_at": "2017-07-28T08:42:02.000000", - "id": "7a16c731-0658-47dd-aa3b-98ee21830e23", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - }] - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/operation_logs/{operation_log_id}: - get: - summary: Operation log. - description: | - Get the information about an operation log. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/operation_log_idParam' - tags: - - Project API - - Scheduled Operation - responses: - '200': - description: The operation log information. - schema: - $ref: '#/definitions/OperationLog' - examples: - application/json: { - "operation_log": { - "status": "available", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "restore_id": null, - "checkpoint_id": "dbc459ff-2ac6-44fa-ba15-89350d7ebd43", - "error_info": null, - "ended_at": "2017-07-28T08:43:22.000000", - "started_at": "2017-07-28T08:42:02.000000", - "id": "7a16c731-0658-47dd-aa3b-98ee21830e23", - "extra_info": null, - "plan_id": "d58ffd3e-f64e-4b67-9bb0-b86d3483e7d0", - "scheduled_operation_id": null, - "operation_type": "protect" - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - - /{project_id}/restores: - get: - summary: Restores - description: | - List all restores finished and in progress, triggered by - a given project. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/statusFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - tags: - - Project API - - Restore - responses: - '200': - description: An array of restores - schema: - type: array - items: - $ref: '#/definitions/Restore' - examples: - application/json: { - "restores": [ - { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "status": "success" - } - ], - "restores_links": [ - { - "href": "/v1/{project_id}/restores?limit={limit_num}&marker=22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "rel": "next" - } - ] - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - post: - summary: Restores - description: | - Start a restore. - tags: - - Project API - - Restore - parameters: - - $ref: '#/parameters/projectParam' - - name: restore - in: body - required: true - schema: - $ref: '#/definitions/RestoreCreateBody' - responses: - '200': - description: The new started restore information - schema: - $ref: '#/definitions/Restore' - examples: - application/json: { - "restore": { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "status": "success" - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/restores/{restore_id}: - get: - summary: Restores - description: | - Get the information of a given restore. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/restore_idParam' - tags: - - Project API - - Restore - responses: - '200': - description: The restore information - schema: - $ref: '#/definitions/Restore' - examples: - application/json: { - "restore": { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "restore_target": "http://192.168.1.2/identity/", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "status": "success" - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - - /{project_id}/triggers: - get: - summary: Triggers - description: | - List all of the triggers created by a given project. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/nameFilterParam' - - $ref: '#/parameters/typeFilterParam' - - $ref: '#/parameters/propertiesFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - tags: - - Project API - - Trigger - responses: - '200': - description: An array of triggers - schema: - type: array - items: - $ref: '#/definitions/Trigger' - examples: - application/json: { - "triggers": [{ - "trigger_info": { - "id": "cc1a-4516-9435-0ebb13caa398", - "name": "My backup trigger", - "type": "time", - "properties": { - "format": "crontab", - "pattern": "0 9 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "60", - } - } - }, - ], - "triggers_links": "" - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - post: - summary: Trigger - description: | - Create a new scheduled operation. - tags: - - Project API - - Trigger - parameters: - - $ref: '#/parameters/projectParam' - - name: trigger_info - in: body - required: true - schema: - $ref: '#/definitions/TriggerCreateBody' - responses: - '200': - description: The new created trigger - schema: - $ref: '#/definitions/Trigger' - examples: - application/json: { - "trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "name": "My backup trigger", - "type": "time", - "properties": { - "format": "crontab", - "pattern": "0 9 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "60", - } - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - /{project_id}/triggers/{trigger_id}: - put: - summary: Trigger - description: | - Update the specified trigger created by a given project. - tags: - - Project API - - Trigger - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/trigger_idParam' - - name: trigger_info - in: body - required: true - schema: - $ref: '#/definitions/TriggerUpdateBody' - responses: - '200': - description: The updated trigger - schema: - $ref: '#/definitions/Trigger' - examples: - application/json: { - "trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "name": "My backup trigger", - "type": "time", - "properties": { - "format": "crontab", - "pattern": "0 10 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "60", - } - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - get: - summary: Triggers - description: | - Get the specified trigger created by a given project. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/trigger_idParam' - tags: - - Project API - - Trigger - responses: - '200': - description: The trigger information - schema: - $ref: '#/definitions/Trigger' - examples: - application/json: { - "trigger_info": { - "id": "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - "name": "My backup trigger", - "type": "time", - "properties": { - "format": "crontab", - "pattern": "0 9 * * *", - "start_time": "2015-12-17T08:30:00", - "end_time": "2016-03-17T08:30:00", - "window": "60", - } - } - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - delete: - summary: Trigger - description: | - Delete a trigger created by a given project. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/trigger_idParam' - tags: - - Project API - - Trigger - responses: - '200': - description: Trigger deleted - '424': - description: Trigger is being used by scheduled operation - schema: - $ref: '#/definitions/Error' - examples: - application/json: { - "code": 424, - "message": "Trigger is being used by one or more operations", - } - default: - description: Unexpected error - schema: - $ref: '#/definitions/Error' - -definitions: - Provider: - type: object - required: [ name ] - properties: - id: - readOnly: true - type: string - format: UUID - description: | - Unique identifier representing a specific protection provider. - name: - type: string - description: Display name of provider. - description: - type: string - description: Description of provider. - extended_info_schema: - $ref: '#/definitions/ExtendedInfoSchema' - - ExtendedInfoSchema: - type: object - required: [ options_schema, saved_info_schema, restore_schema ] - properties: - options_schema: - type: object - description: | - A mapping between a resource type and a JSON Schema that defines the options for that type. - saved_info_schema: - type: object - description: | - A mapping between a resource type and a JSON Schema that defines the saved info fields for that type. - restore_schema: - type: object - description: | - A mapping between a resource type and a JSON Schema that defines the restore options for that type. - - Checkpoint: - type: object - properties: - id: - readOnly: true - type: string - format: UUID - description: | - Unique identifier representing a specific protection provider. - project_id: - type: string - format: UUID - protection_plan: - readOnly: true - $ref: '#/definitions/ProtectionPlan' - status: - readOnly: true - type: string - - CheckpointCreateBody: - type: object - discriminator: plan_id - required: [ plan_id ] - properties: - plan_id: - readOnly: true - type: string - format: UUID - description: Unique identifier representing a specific protection protection plan. - extra_info: - readOnly: true - type: dict - description: The extra info parameter of the checkpoint. - - Resource: - type: object - properties: - id: - readOnly: true - type: string - type: - $ref: '#/definitions/ProtectableType' - readOnly: true - name: - readOnly: true - type: string - - ProtectionPlan: - type: object - required: [ name, provider_id, resources ] - properties: - id: - readOnly: true - type: string - format: UUID - description: Unique identifier representing a specific protection protection plan. - name: - type: string - description: Display name of plan. - description: - type: string - description: description about the plan. - resources: - type: array - items: - $ref: '#/definitions/Resource' - status: - type: string - description: The status of the plan. It's either 'suspended' or 'started'. - provider_id: - type: string - format: UUID - description: | - Unique identifier representing a specific protection provider that - will store checkpoints for this protection plan. - parameters: - type: object - format: dict - - ProtectionPlanUpdateBody: - type: object - properties: - name: - type: string - description: Display name of plan. - resources: - type: array - items: - $ref: '#/definitions/Resource' - status: - type: string - description: The status of the plan. It's either 'suspended' or 'started'. - - ProtectableType: - type: string - format: Heat Type String - example: "OS::Nova::Server" - description: | - Name of the resource type. When available the types that are defined by Heat - are used. - - ProtectableInfo: - type: object - properties: - name: - $ref: '#/definitions/ProtectableType' - dependent_types: - type: array - description: | - List of types that might depend on this type. For example an - "OS::Nova::Server" has "OS::Cinder::Volume" as a dependent type. - items: - $ref: '#/definitions/ProtectableType' - - OperationDefinition: - type: object - discriminator: type - required: [ type ] - properties: - id: - type: string - format: UUID - description: | - Unique identifier representing a specific operation definition. - type: - type: string - description: | - Type of the operation. This defines what kind of operation this - object defines the arguments for. - - ProtectOperationDefinition: - description: | - Operation definition for protect operation. - allOf: - - $ref: '#/definitions/OperationDefinition' - - type: object - properties: - protection_plan_id: - type: string - format: UUID - parameters: - type: object - format: dict - required: [ protection_plan_id ] - - DeleteOperationDefinition: - description: | - Operation definition for delete operation. - allOf: - - $ref: '#/definitions/OperationDefinition' - - type: object - properties: - checkpoint_path: - type: string - provider_id: - type: string - format: UUID - required: [ checkpoint_path, provider_id ] - - StartOperationDefinition: - description: | - Operation definition for start operation. - allOf: - - $ref: '#/definitions/OperationDefinition' - - type: object - properties: - protection_plan_id: - type: string - format: UUID - required: [ protection_plan_id ] - - SuspendOperationDefinition: - description: | - Operation definition for suspend operation. - allOf: - - $ref: '#/definitions/OperationDefinition' - - type: object - properties: - protection_plan_id: - type: string - format: UUID - required: [ protection_plan_id ] - - RestoreOperationDefinition: - description: | - Operation definition for restore operation. - allOf: - - $ref: '#/definitions/OperationDefinition' - - type: object - properties: - checkpoint_id: - type: string - format: UUID - provider_id: - type: string - format: UUID - restore_target: - type: string - format: UUID - restore_auth: - type: object - format: dict - parameters: - type: object - format: dict - required: [ checkpoint_id, provider_id ] - - ScheduledOperation: - type: object - properties: - id: - type: string - format: UUID - description: | - Unique identifier representing a specific scheduled operation. - name: - type: string - description: Display name of scheduled operation. - description: - type: string - description: Description about the scheduled operation. - operation_type: - type: string - trigger_id: - type: string - format: UUID - operation_definition: - type: object - description: | - Supply parameters for the operation type. - enabled: - type: integer - - ScheduledOperationCreateBody: - type: object - properties: - name: - type: string - description: Display name of scheduled operation. - description: - type: string - description: Description about the scheduled operation. - operation_type: - type: string - trigger_id: - type: string - format: UUID - operation_definition: - type: object - description: | - Supply parameters for the operation type. - - OperationStatus: - type: object - properties: - status: - type: string - description: - type: string - - OperationLog: - type: object - properties: - id: - type: string - format: UUID - description: | - Unique identifier representing a specific operation log. - scheduled_operation_id: - type: string - format: UUID - description: | - ID of the scheduled operation that defines this log. - started_at: - type: string - description: When was this operation run started in ISO 8601 format. - ended_at: - type: string - description: When was this operation run ended in ISO 8601 format. - status: - type: string - description: Status of the operation could be running, finished, failed. - error: - type: string - description: If the status is 'failed' this will contain the reason for the failure. - entries: - type: array - description: Complete log of the entires for the operation. - items: - $ref: '#/definitions/OperationLogEntry' - - OperationLogEntry: - type: object - properties: - timestamp: - type: string - description: When was this log line emitted in ISO 8601 format. - message: - type: string - description: Message containing information about an event. - - Trigger: - type: object - discriminator: type - required: [ type ] - properties: - id: - type: string - format: UUID - description: | - Unique identifier representing a specific trigger. - name: - type: string - description: Display name of trigger. - type: - type: string - description: | - Type of the trigger. This defines what kind of trigger this - object defines the arguments for. - properties: - type: object - description: | - Trigger properties. Supply parameters for the trigger type. - - TimeTrigger: - description: | - Trigger definition for time trigger. - allOf: - - $ref: '#/definitions/Trigger' - - type: object - properties: - property: - type: object - required: [ start_time, trigger_window, format ] - properties: - format: - type: string - enum: [ crontab ] - start_time: - type: string - trigger_window: - type: string - format: xsd:duration - - TriggerCreateBody: - type: object - discriminator: type - required: [ type ] - properties: - name: - type: string - description: Display name of trigger. - type: - type: string - description: | - Type of the trigger. This defines what kind of trigger this - object defines the arguments for. - properties: - type: object - description: | - Trigger properties. Supply parameters for the trigger type. - - TriggerUpdateBody: - type: object - discriminator: type - properties: - name: - type: string - description: Display name of trigger. - properties: - type: object - description: | - Trigger properties. Supply parameters for the trigger type. - - Restore: - type: object - properties: - id: - type: string - format: UUID - description: | - Unique identifier representing a specific restore. - project_id: - type: string - format: UUID - provider_id: - type: string - format: UUID - checkpoint_id: - type: string - format: UUID - restore_target: - type: string - format: url - parameters: - type: object - format: dict - status: - type: string - - RestoreCreateBody: - type: object - required: [ provider_id, checkpoint_id, restore_target, parameters ] - properties: - provider_id: - type: string - format: UUID - checkpoint_id: - type: string - format: UUID - restore_target: - type: string - format: url - restore_auth: - type: object - format: dict - parameters: - type: object - format: dict - properties: - username: - type: string - password: - type: string - - Error: - type: object - properties: - code: - type: integer - format: int32 - message: - type: string - fields: - type: string - -parameters: - projectParam: - name: project_id - in: path - description: | - Specifies the ID of the project that owns this entity - required: true - type: string - format: uuid - - sortParam: - name: sort - in: query - description: | - Comma-separated list of sort keys and optional sort directions in the - form of `<key>[:<direction>]`. A valid direction is asc - (ascending) or desc (descending). - required: false - type: string - - limitParam: - name: limit - in: query - description: | - Requests a specified page size of returned items from the query. - Returns a number of items up to the specified limit value. - Use the limit parameter to make an initial limited request and use the - ID of the last-seen item from the response as the marker parameter value - in a subsequent limited request. - type: integer - format: int64 - - markerParam: - name: marker - in: query - description: | - Specifies the ID of the last-seen item. Use the limit parameter to make - an initial limited request and use the ID of the last-seen item from the - response as the marker parameter value in a subsequent limited request. - type: string - - parametersParam: - name: parameters - in: query - description: | - The parameters field for protectable instances query. - type: string - provider_idParam: - name: provider_id - in: path - description: id of the provider. - required: true - type: string - format: uuid - - plan_idParam: - name: plan_id - in: path - description: id of the plan. - required: true - type: string - format: uuid - - checkpoint_idParam: - name: checkpoint_id - in: path - description: id of the checkpoint. - required: true - type: string - format: uuid - - trigger_idParam: - name: trigger_id - in: path - description: id of the trigger. - required: true - type: string - format: uuid - - resource_idParam: - name: resource_id - in: path - description: id of the resource. - required: true - type: string - format: uuid - - restore_idParam: - name: restore_id - in: path - description: id of the restore. - required: true - type: string - format: uuid - - scheduled_operation_idParam: - name: scheduled_operation_id - in: path - description: id of the scheduled operation. - required: true - type: string - format: uuid - - operation_log_idParam: - name: operation_log_id - in: path - description: id of the operation log. - required: true - type: string - format: uuid - - nameFilterParam: - name: name - in: query - description: name of the entity. Could be a regex pattern. - required: false - type: string - format: regex - - typeFilterParam: - name: type - in: query - description: type of the entity. - required: false - type: string - - statusFilterParam: - name: status - in: query - description: status of the entity. - required: false - type: string - - propertiesFilterParam: - name: properties - in: query - description: property of the entity. Could be a regex pattern. - required: false - type: string - format: regex - - scheduled_operation_typeFilterParam: - name: operation_type - in: query - description: type of the scheduled operation. - required: false - type: string - format: regex - - scheduled_operation_defFilterParam: - name: operation_definition - in: query - description: definition of the scheduled operation. Could be a regex pattern. - required: false - type: string - format: regex - - trigger_idFilterParam: - name: trigger_id - in: query - description: id of the trigger. - required: false - type: string - format: uuid - - protectable_typeParam: - name: protectable_type - in: path - description: the resource type. - required: true - type: string - format: Heat Type String diff --git a/doc/source/specs/available_protectables.pu b/doc/source/specs/available_protectables.pu deleted file mode 100644 index b74b3948..00000000 --- a/doc/source/specs/available_protectables.pu +++ /dev/null @@ -1,26 +0,0 @@ -@startuml - -title Karbor Protectables - -hide circle - -legend top -This file contains the dependency between protectables in the default -distribution of Karbor. The arrows, similar to inheritance point to the parent -since Protectables define what types they depend on so this reflects who is -responsible for the connection. -endlegend - -class OS::Glance::Image extends OS::Nova::Server, OS::Keystone::Project - -class OS::Cinder::Volume extends OS::Nova::Server, OS::Keystone::Project - -class OS::Nova::Server extends OS::Keystone::Project - -class OS::Keystone::Project - -class OS::Neutron::Topology extends OS::Keystone::Project - -class OS::Manila::Share extends OS::Keystone::Project - -@enduml diff --git a/doc/source/specs/bank-plugin-lease.rst b/doc/source/specs/bank-plugin-lease.rst deleted file mode 100644 index c42b28c3..00000000 --- a/doc/source/specs/bank-plugin-lease.rst +++ /dev/null @@ -1,178 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================= -Bank Plugin Basic -================= -Bank Plugin is a component of karbor (an openstack project working as a -service for data protection), which is responsible for execute CRUD actions in -Bank. - -The bank is a backend (such as swift) which is used to store the metadata/data -of protection plan. Here, we take swift as an bank implementation example. - -****** -leases -****** -Karbor will create a checkpoint when protecting a protection plan. This -checkpoint is maintained with status, which is a enum type: protecting, -available, restoring, deleted, etc. - -The status is used for karbor API layer to control access to one checkpoint from -users. - -With the 'protecting' status, there're two cases which we can't tell the -difference: - -1. The protection service is working and those 'protecting' protection plan are - being executed; - -2. When the Protection Service crashes, those 'protecting' protection plan are - actually zombie ones, and those checkpoints are zombie ones too; - -In the second case, we need a garbage collection component (GC) to cleanup those -zombie checkpoints. - -In order to tell whether the checkpoint is a zombie or not, we introduce a lease -mechanism based on bank plugin. - -Here, we take swift as an example. The lease is stored as an object in swift -with the characteristics of auto-deleted. - -The owner of one checkpoint will periodically refresh the expire time of the -lease object key. - -When the protection service crashes, the leases of bank plugins will be -auto-deleted by the swift-object-expirer(one service of swift). - -When GC comes to check whether one checkpoint is a zombie to be collected, GC -will first get the owner of the checkpoint. Then it will check whether the lease -of the owner exists. - -If the lease exists, those 'protecting' checkpoints can not be deleted by the -GC; otherwise the GC will cleanup them. - -Granularity -=========== -To avoid flood to bank server, we don't keep one lease for per checkpoint. -Instead, we keep one lease per checkpoint owner. So the granularity of lease is -per bank plugin instance. - -When one protection service instance gets initialized, each bank plugin instance -will get initialized as well. Each bank plugin will start to maintain its own -leases with its corresponding bank server. - -Here, every bank plugin will play a role as lease client while the bank server -(swift cluster) plays as the lease server. - -Functions -========= -acquire_lease -------------- -Each bank plugin (lease client) will use this function to acquire a lease from -bank server (lease server). - -For swift specifically, it will create a lease object in swift container and set -an expire_window for this lease. - -The expire_window represents the validity of this lease from creation -(or latest-renew) until being auto-deleted by swift server. The value of -expire_window should be configurable. - -We use owner_id to identify one instance of bank plugin. The owner_id is a uuid -created when bank plugin instance is initiated, say, generated from sha256 with -parameter as hostname and the timestamp instance initiated. - -The key of lease object stored in swift looks like this: -/account/leases/owner_id. - -In order to map one checkpoint to its owner, we will create an index like this: -/account/checkpoints/checkpoint_id/owner when creating a checkpoint. - -- create_owner_id: create a uuid to represent this bank plugin instance -- put_object: use swift-client to create a lease object in swift, and set - 'X-Delete-After' as: expire_window -- set_expire_time in memory in lease client side: set the expire_time as: - now+expired_time - -renew_lease ------------ -This function will be called by each lease client in the background -periodically. - -The renew_window represents the period with which the lease client will refresh -lease frequently. This renew_window is configurable as well, where -renew_window < expire_window. - -If lease client succeeds to renew lease, this lease has a new expire_window in -lease server from now on. Then the lease client side will update the expire_time -in memory with value as: expire_time = now + expired_window. - -If lease client fails to renew, this lease object keeps the old expire_window in -lease server side. The lease client won't update its expire_time in memory. - -- post_object: use swift-client to reset the 'X-Delete-After' header as: - expired_window -- update_expire_time: if post_object succeeds, update expire_time as: - now+expired_window; otherwise, don't refresh the expire_time. - -check_lease_validity --------------------- -This function is used by the checkpoint owner to check whether there is enough -time to execute an -update operation to one checkpoint (or anything else guarded by the lease) -before the lease expiring. - -We use validity_window to represent the time window inside which an update -operation to a checkpoint should complete. This window is configurable and -should be estimated by admin. - -This function will check if validity_window <= expire_time - now. If it's true, -this function will return true and thus allow update operation to go ahead; -otherwise, this function will return false and the update operation will abort. - -Although the lease may haven't expired when -validity_window <= expire_time - now, there might not be enough time to finish -the update operation. If we allow the update operation to go ahead under this -situation, there is a risk that while the operation is still on-going, the -lease has been recycled by lease server during this period. - -check_lease_existence ---------------------- -This function is used by GC to check whether the lease object exists or not in -lease server side. - -Specifically for checkpoints, GC will scan all checkpoints in 'protecting' -status. It will first get the owner of a checkpoint through its index, and then -check the existence of the lease object in lease server. If the lease object -doesn't exist, it will take this checkpoint as zombie and go ahead to recycle -it. Otherwise, it will skip this checkpoint and leave it there. - -Configurations -============== - -renew_window ------------- -- represents the period with which lease client will renew the lease in - background. - -expire_window -------------- -- represents how long this lease from creation or latest-renew to expire in - lease server side. -- Note: expired_window > renew_window. To make renew mechanism more robust, - we recommend to set expired_window = N*renew_window. With this setting, we - allow (N-1) times failure to renew lease to tolerate unstable network case or - IO scheduling issue; - -validity_window ---------------- -- an optional configuration; The default value it set according to the - renew_window, validity_window <= renew_window -- the window estimated by admin, how long one update operation will take at - most. The constraint here should be: validity_window < expire_window. -- Note: Same background as renew_window setting, to allow (N-1) times failure - of renew lease, we recommend to set validity_window <= renew_window. diff --git a/doc/source/specs/bank.md b/doc/source/specs/bank.md deleted file mode 100644 index fe2b066f..00000000 --- a/doc/source/specs/bank.md +++ /dev/null @@ -1,154 +0,0 @@ -# Bank basics - -*** :exclamation: This is still a work in progress *** - -This document will describe the layout and algorithms used by Karbor using the -default bank implementation. Providers can use their own algorithms to manage -metadata but there might be issues when using default plugins. - -## Abstract - -Since Karbor wants to be able to store metadata in many locations (swift, mongodb, etc.) -we defined a simplified object store interface that we believe most backends will be able -to support without much work. - -But the simplified interface doesn't describe how Karbor will do it's higher -level operations and how the higher level logic will be laid out in the object -store. This is why we need higher level logic defined explicitly so that later -we could use higher level bank functions knowing they are correct, safe and atomic. - -## Layout - -### Checkpoint directory - -`/checkpoints//index.json` - -#### Example content -*time is in ISO 8601 time UTC* -```json -{ - "trigger": {}, - "started_at": "2015-10-29T13:41:02Z", - "status": "in progress", - "plan": {}, - "provider_id": "bc9f8572-6908-4353-aed5-2ba165c78aa6", - "provider_version": "1.2.0", - "plugins": { - "plugin_1": { - "name": "cinder volume", - "version": "1.2.3", - } - } -} -``` - -### Protection definition directory - -`/checkpoints///index.json` - -#### Example content - -```json -{ - "name": "vm", - "id": "8a562ed6-81ff-4bda-9672-2a8c49f130c3", - "dependent_resources": [ - "92b022d9-cca4-4d02-b7fb-6cec9183d9f2", - "b081d472-023c-4a98-b57b-f2013996739b" - ] -} -``` - -### Protection definition plugin data directory - -`/checkpoints///plugin_data/*` - -## Checkpoint Creation Process - -Create new Checkpoint with id ; - -1. Acquire checkpoint lease - * action acquire_lease - * id: `` -2. Create checkpoint pointer - * action: write_object - * path: `/indices/unfinished_checkpoints/`, - * buffer: `` -3. Create checkpoint - * action: write_object - * path: `/checkpoints//index.json`, - * buffer: - ```json - { - "karbor_version": "1.0.0", - "status": "in_progress", - "plugins": {} - } - ``` -4. Run plugins -5. Checkpoint finished but indices not yet created - * action: write_object - * path: `/checkpoints//index.json`, - * buffer: - ```json - { - "karbor_version": "1.0.0", - "status": "creating_indices", - "plugins": {} - } - ``` -6. Create index 'plan' (Example, there could be any number of indexes) - * action: write_object - * path: `/indices/by_plan//` - * buffer: `` -7. Remove checkpoint pointer - * action: delete_object - * path: `/indices/unfinished_checkpoints/` -8. Release checkpoint lease - * action: release_lease - * id: `` - -## Delete Checkpoint - -1. Create checkpoint pointer - * action: write_object - * path: `/indices/deleted_checkpoints/`, - * buffer: `` -2. Mark transaction as being deleted - * action: write_object - * path: `/checkpoints//index.json`, - * buffer: - ```json - { - "karbor_version": "1.0.0", - "status": "deleting", - "plugins": {} - } - ``` -From this point on the checkpoint is considered deleted and should not be used -or returned by the provider. - -## GC - -When deleting a checkpoint the checkpoint is only marked as deleted. On of the -Karbor server will have to run a GC collection run and make sure all the actual -data is free. This is done to unify all the cleanup to one flow and make sure -the deletion has been propagated to all sites before actually deleting the data. - - -For each checkpoint in `/indices/deleted_checkpoints` - -1. Remove indices - - Remove index 'plan' (Example, there could be any number of indexes) - * action: delete_object - * path: `/indices/by_plan//` -2. Run plugins -3. Delete checkpoint file - * action: delete_object - * path: `/checkpoints//index.json`, -4. Remove checkpoints pointer - * action: delete_object - * path: `/indices/unfinished_checkpoints/` -5. Delete checkpoint deletion marker - * action: delete_object - * path: `/indices/deleted_checkpoints/` diff --git a/doc/source/specs/checkpoint-copy-api.rst b/doc/source/specs/checkpoint-copy-api.rst deleted file mode 100644 index 5e219e1a..00000000 --- a/doc/source/specs/checkpoint-copy-api.rst +++ /dev/null @@ -1,187 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================== -Add checkpoint copy API about protection to Karbor -================================================== - -https://blueprints.launchpad.net/karbor/+spec/support-copy-the-checkpoint-api - -Problem description -=================== - -Use case: Karbor + the backup software plugins + storage back-ends - -All the backup softwares have multifarious storage back-ends. These storage back-ends of it -may be the traditional storage array devices or the object storage(Swift/S3). The backup -softwares can manage and choose to use the storage back-ends. - -To prevent backup data about the resources in OpenStack loss, the backup software will deploy -several storage back-ends in different Availability Zones or Regions for one cloud environment. -By default, the resources in one plan are backed up when user creating a checkpoint in Karbor. -The backup data of these resources about this checkpoint will be stored in the main storage -back-ends of the backup software. The could provider want to expose the copy capacity of the -backup software, so that the resources backup data can be copied from main storage back-end -to another back-end in different az or region. Even the backup data of these resources in main -storage back-end are damaged for some reasons, the resources in this plan also can be restored -from another storage back-end of the backup software. - - -Use Cases -========= - -User want to copy the backup copies in one checkpoint from one back-end to another storage -back-end of the backup software via a new RESTful API before he restoring new resources from -this checkpoint. -The backup softwares vendors also need Karbor protection plugins to support a copy -operation about the resource backup data, so that they can expose the copy of backup -data to users from Karbor protection service. - - -Proposed change -=============== -1. Add the copy API controller for the Karbor API. - Implement the 'create' method of copy API controller. - In this API controller, all the uncopied checkpoint created from this plan - will be copied. - -2. The copy status of checkpoint resources. - CHECKPOINT_STATUS_WAIT_COPYING = 'wait_copying' - CHECKPOINT_STATUS_COPYING = 'copying' - CHECKPOINT_STATUS_COPY_FINISHED = 'finished' - -3. Add a new copy operation for protection plugins - - Add a new CopyOperation for protection plugins. The copy operation for the plugin is optional, - most of time the backup softwares plugins can implement a copy operation. - For example, the CopyOperation of backup software volume protection plugin, the backup data - can be copied from main storage back-end to another back-end in different az or region. - - -4. Add operation_log for copy API. - Add a new copy flow in the protection service of Karbor. - If the CopyOperation about the checkpoint has not run successfully in the - copy flow, the status of operation_log object will be set to 'error' in the - 'revert' method of InitiateCopyTask. The status of operation_log object will - be set to 'success' in the CompleteCopyTask. - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -1. Create copy API -The request JSON when creating a copy:: - - **post** : /v1/{project_id}/providers/{provider_id}/checkpoints/action - ```json - { - "copy": [ - { - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "plan_id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - } - } - ] - } - - -The response JSON when Creating a copy:: - - ```json - { - "copy":{ - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "plan_id": "3523a271-68aa-42f5-b9ba-56e5200a2ebb", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "status": "error" - } - } - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Add a new RESTful API about copy -* Add copy API to karbor client - -Dependencies -============ - - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -None - -References -========== -None diff --git a/doc/source/specs/checkpoint-metadata.rst b/doc/source/specs/checkpoint-metadata.rst deleted file mode 100644 index 2ea083ed..00000000 --- a/doc/source/specs/checkpoint-metadata.rst +++ /dev/null @@ -1,142 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=================================== -API for custom checkpoint meta-data -=================================== - -https://blueprints.launchpad.net/cinder/+spec/custom-checkpoint-metadata - -Problem description -=================== - -Currently when creating a checkpoint. The only place to add custom information -apart from the name is in the description field. This means that tools that -create checkpoints needs to use this user visible space for their own metadata. - -User data can only be set during creation and is read-only from that point on. - -Use Cases -========= - -A tool creates a checkpoint and want's to add information related to the tools -functions. For example a tool that creates checkpoints due to specific external -events would like to add the event_id of the event that triggered the -checkpoint creation. - -Proposed change -=============== - -When creating a checkpoint a new field would be available called -``extra-info``. -This field must be a map in the format of:: - - { - "key": "value", - } - -Keys and values *must* both be strings. Keys that are officially recognized -will be in the format of ``karbor-`` for example -``karbor-created-by``. - -Anything that is not officially defined *should* use the -prefix: ``x---``. -For example, ``x-trigger-master--trigger-id`` - -Alternatives ------------- - -Do nothing, this is not a mission critical feature. - -Data model impact ------------------ - -New field for a checkpoint called ``extra-info``. - -REST API impact ---------------- - -New optional body attribute when creating a new checkpoint:: - - POST/v1/{tenant_id}/checkpoint - - { - ... - "extra-info": { - "karbor-created-by": "operation-engine" - } - } - -Security impact ---------------- - -We need to make sure the number of entries and their size is within bounds -to prevent any attacks. - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -The new API will be exposed to users via the python-karborclient. - -Performance Impact ------------------- - -Filtering the results might cause a slight performance impact for the REST -API. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write API -* Add to Karbor client -* Write tests -* Add documentation - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor and the python-karborclient. - - -Documentation Impact -==================== - -New docs to explain how to use the API. - - -References -========== - -None diff --git a/doc/source/specs/checkpoint-scheduled-clean.rst b/doc/source/specs/checkpoint-scheduled-clean.rst deleted file mode 100644 index 8c2f96ad..00000000 --- a/doc/source/specs/checkpoint-scheduled-clean.rst +++ /dev/null @@ -1,194 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================== -Checkpoint scheduled clean -========================== - -https://blueprints.launchpad.net/karbor/+spec/checkpoint-scheduled-clean - -Problem description -=================== - -Karbor provides Operation Engine Service to support the scheduled operation for -a protection plan. The scheduled operations will create lots of checkpoints as -the triggers define the rules. The checkpoints will be created every day or -every week or every month. Currently Karbor has no automatic clean feature and -policy for the end user. - -Use Cases -========= - -With more and more checkpoints created in the Bank, the end users need more -and more storage capacity, and some checkpoints are not meaningful and -necessary for the end users. The checkpoint scheduled clean feature is -necessary to satisfy the end user's requirement. - -Proposed change -=============== - -Karbor provides the end users some settings including ``max_backups`` and -``retention_duration`` for the scheduled operation. Karbor could clean the -deprecated checkpoints which are created by the scheduled operation -automatically as the end users define. - -#. **max_backups**: the max amount of checkpoints.which are created by the - scheduled operation. e.g. 10. -#. **retention_duration**: the retention time of checkpoints which are created - by the scheduled operation. e.g. 20 weeks. - -Karbor provides the default values for these two settings. The default value is --1, which means Karbor will not clean the checkpoints by default. When the end -users launch a scheduled operation, they can input the values of these two -settings and invoke the scheduled operation RESTful API. - -Meanwhile a database table called **checkpoint_records** will be created to -store the checkpoints which are created by the **scheduled protect** or -**protect now**. This table's data will keep a real-time mapping to the -checkpoints in the Bank. - -+--------------------+--------------+------+-----+---------+----------------+ -| Field | Type | Null | Key | Default | Extra | -+====================+==============+======+=====+=========+================+ -| created_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| updated_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| deleted_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| deleted | tinyint(1) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| id | uuid | NO | PRI | NULL | auto_increment | -+--------------------+--------------+------+-----+---------+----------------+ -| project_id | varchar(36) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| checkpoint_id | varchar(36) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| checkpoint_status | varchar(36) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| provider_id | varchar(36) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| plan_id | varchar(36) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| operation_id | varchar(36) | YES | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| create_by | varchar(36) | YES | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| extend_info | Text | YES | | NULL | detail info | -+--------------------+--------------+------+-----+---------+----------------+ - -When the Operation Engine Service triggers an operation, and the operation -will invoke the REST API to create a checkpoint, and the API Service will -launch a checkpoint creation workflow and save the checkpoint information into -database table called **checkpoint_records**. If the end users delete a -checkpoint, the API Service will launch a checkpoint deletion workflow and -delete this checkpoint information from the database. - -When the operation is triggered in the Operation Engine Service, the operation -could get the values of ``max_backups`` and ``retention_duration`` from the -scheduled operation, and then the operation will judge these two values to -confirm whether it will invoke the REST API to clean the deprecated -checkpoints. - -Alternatives ------------- - -Do nothing, this is not a mission critical feature. - -Data model impact ------------------ - -Add two key-values in the operation_definition of scheduled protect operation. -These two optional key-values are called ``max_backups`` and -``retention_duration``. - -REST API impact ---------------- - -New optional body attribute when creating a scheduled operation:: - - **POST** : /v1/{project_id}/scheduled_operations - ```json - { - ... - "operation_definition": { - ... - "max_backups": 10, - "retention_duration": 20 - } - } - ``` - -Security impact ---------------- - -We need to make sure the two values are within bounds to prevent any attacks. -The bounds will be defined in the local config. - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -The new API will be exposed to users via the python-karborclient. - -Performance Impact ------------------- - -Filtering the results might cause a slight performance impact for the REST -API. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write API -* Add to Karbor client -* Write tests -* Add documentation - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor and the python-karborclient. - - -Documentation Impact -==================== - -New docs to explain how to use the API. - - -References -========== - -None diff --git a/doc/source/specs/checkpoint-tenant-isolation.rst b/doc/source/specs/checkpoint-tenant-isolation.rst deleted file mode 100644 index d38a3419..00000000 --- a/doc/source/specs/checkpoint-tenant-isolation.rst +++ /dev/null @@ -1,93 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================== -Multi-tenant Isolation in Managing the Checkpoints -================================================== - -https://blueprints.launchpad.net/karbor/+spec/checkpoint-tenant-isolation - -Problem description -=================== - -In multi-tenants scenario, when a user lists all the checkpoints they -created, all the checkpoints in the bank will be returned. This is problematic -as there is nothing stopping one project to restore or delete another's data. -This means that users can use the update\restore mechanism to bypass other -security in OpenStack. - -Use Cases ------------ - -1. Provide a way to make the end users can only list the checkpoints that -created by themselves. -2. In cross site scenario, users can only do backup and restore between two -sites which have same project ids. -3. Admin can query all the checkpoints with parameter '--all-projects'. - - -Proposed Change -=============== - -Every project can see the checkpoints that are created by themselves. -Admin can see all the checkpoints in the bank. - -Data model impact ------------------ -Adding `projects_id` to the data path of checkpoints in the bank 'indices'. - -For example: -/checkpoints/f7702b65-6abe-4302-9542-4fb511ce5e14/ <- directory -/indices/by-date/2017-09-20/016fa93a9b204c49a12425574bdc5f4e/ <- by date -/indices/by-plan/08a5a407-6252-4514-9159-5f554af2acd0/016fa93a9b204c49a12425574bdc5f4e/ <- by plan -/indices/by-provider/cf56bd3e-97a7-4078-b6d5-f36246333fd9/016fa93a9b204c49a12425574bdc5f4e/ <- by provider - -'016fa93a9b204c49a12425574bdc5f4e' is a project id. - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Dependencies -============ - -None - -Testing -======= - -None - -Documentation Impact -==================== - -None - -References -========== - -None \ No newline at end of file diff --git a/doc/source/specs/checkpoint-verify-api.rst b/doc/source/specs/checkpoint-verify-api.rst deleted file mode 100644 index f58b7c99..00000000 --- a/doc/source/specs/checkpoint-verify-api.rst +++ /dev/null @@ -1,288 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================================== -Add checkpoint verification API about protection to Karbor -========================================================== - -https://blueprints.launchpad.net/karbor/+spec/support-verify-the-checkpoint-api - -Problem description -=================== - -User want to verify the backup copies in one checkpoint before he restoring new -resources from this checkpoint. If the verification about the backup copies in one -checkpoint fails, it means that the backup data is corrupted and invalid for recovering. -User can not restore the resource from this backup data of the checkpoint. -All backup softwares support verify the backup data copes[1] [2]. The verification -work can be done in the verification operation of the vendors protection plugins. - -The verification operation for the plugin is optional, most of the plugins should -implement a verification operation. -For example, in the cinder backup volume plugin, the backup being still in place and -the status of backup can be checked in the verification operation of this plugin. -Some plugins can and should make sure metadata is accessible from the bank in the -verification operation of the plugins. - - -Use Cases -========= - -User want to verify the backup copies in one checkpoint via a new RESTful API before -he restoring new resources form this checkpoint. -The backup softwares vendors also need karbor protection plugins to support a verification -operation, so that they can expose the verification of backup data to users from Karbor -protection service. - - -Proposed change -=============== -1. Add the verification API controller for the Karbor API. - Implement the 'create' method of verification API controller. - Implement the 'show' method of verification API controller. - Implement the 'index' method of verification API controller. - -2. The status of verification resources. - VERIFICATION_STATUS_VERIFYING = 'verifying' - VERIFICATION_STATUS_SUCCESS = 'success' - VERIFICATION_STATUS_ERROR = 'error' - -3. Add a new verification operation for protection plugins - - Add a new VerificationOperation for protection plugins. The verification operation for - the plugin is optional, most of the plugins should implement a verification operation. - For example, the VerificationOperation of image protection plugin, the backup data in - swift bank can be verified by checking the etag of objects in the swift. - - The VerificationOperation of cinder protection plugin, default cinder volume plugin don't - support volume backup data verification, cinder has not expose the api about backup - data verification. So we can check the backup being still in place and the status of - backup resources in Cinder. - Some plugins can and should make sure metadata is accessible from the bank in the - verification operation of the plugins. - - -4. Add operation_log for verification API. - Add a new verification flow in the protection service of Karbor. - If the VerificationOperation about the checkpoint has not run successfully in the - verification flow, the status of operation_log object will be set to 'error' in the - 'revert' method of InitiateVerificationTask. The status of operation_log object will - be set to 'success' in the CompleteVerificationTask. - - -Alternatives ------------- - -None - -Data model impact ------------------ - -+-------------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------------------+--------------+------+-----+---------+-------+ -| id | varchar(36) | NO | PRI | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| project_id | varchar(255) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| provider_id | varchar(36) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| checkpoint_id | varchar(36) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| status | varchar(64) | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| parameters | Text | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resources_status | Text | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resources_reason | Text | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ - -REST API impact ---------------- - -1. Create verification API -The request JSON when creating a verification:: - - **post** : /v1/{project_id}/verifications - ```json - { - "verification": [ - { - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - } - } - ] - } - - -The response JSON when Creating a verification:: - - ```json - { - "verification":{ - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "resource_status": { - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": "verifying", - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "error" - }, - "resource_reason": { - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "Backup not found" - }, - "status": "error" - } - } - - - -2. List verifications API -The response JSON when listing verifications:: - - **get** : /v1/{project_id}/verifications - ```json - { - "verifications": [ - { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "resource_status": { - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": "verifying", - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "error" - }, - "resource_reason": { - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "Backup not found" - }, - "status": "error" - } - ] - } - - -3. Show verifications API -The response JSON when showing a verification:: - - **get** : /v1/{project_id}/verifications/{verification_id} - ```json - { - "verification":{ - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "provider_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "parameters": { - "OS::Cinder::Volume": { - }, - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": { - } - }, - "resource_status": { - "OS::Nova::Server#3f8af6c6-ecea-42bd-b44c-724785bbe5ea": "verifying", - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "error" - }, - "resource_reason": { - "OS::Cinder::Volume#98eb847f-9f59-4d54-8b7b-5047bd2fa4c7": "Backup not found" - }, - "status": "error" - } - } - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Add a new RESTful API about verification -* Add database data module of verification -* Add verification to karbor client - -Dependencies -============ - - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -None - -References -========== - -[1] http://documentation.commvault.com/commvault/v10/article?p=features/data_verification/data_verification.htm - -[2] https://www.veritas.com/content/support/en_US/doc-viewer.123533878-127136857-0.v123545982-127136857.html - diff --git a/doc/source/specs/cinder-volume-snapshot-plugin.rst b/doc/source/specs/cinder-volume-snapshot-plugin.rst deleted file mode 100644 index 99d7076e..00000000 --- a/doc/source/specs/cinder-volume-snapshot-plugin.rst +++ /dev/null @@ -1,184 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================== -Cinder volume snapshot protection plugin -======================================== - -https://blueprints.launchpad.net/karbor/+spec/cinder-volume-snapshot-plugin - -Problem description -=================== - -Now there is a cinder default volume protection plugin implemented using the backup -feature of cinder volume in karbor. A new cinder protection plugin will be introduced to -karbor, it will use the snapshot feature of cinder to protect the volume. - - -Use Cases -========= - -User creates a volume in cinder, and mounts it to the server. Then the volume -is used for saving lots of data by user. To avoid the loss of data, the user -want to protect the volume by making periodic snapshots of this volume. -If the user want to restore the volume, he can create a new volume from this -snapshot. - -Proposed change -=============== - -Cinder volume snapshot protection plugin: ------------------------------------------ -A new snapshot protection plugin about Cinder volume need be implemented. - -1. Protect Operation: -The 'create' method of cinderclient's SnapshotManager will be called in the main hook -of this operation to make a snapshot of the volume. A snapshot of the resource -volume will be created. - -2. Restore Operation: -The 'create' method of cinderclient's VolumeManager will be called in the main hook of -this operation to create a new volume from the giving snapshot. -A new volume from the snapshot will be created. - -3. Delete Operation: -The volume snapshot will be deleted. -The 'delete' method of cinderclient's SnapshotManager will be called in the main hook -of this operation to delete the volume snapshot. - -Cinder volume snapshot protection plugin schema: ------------------------------------------------- - -:: - - OPTIONS_SCHEMA = { - "title": "Volume Snapshot Protection Options", - "type": "object", - "properties": { - "snapshot_name": { - "type": "string", - "title": "Snapshot Name", - "description": "The name of the volume snapshot." - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the volume snapshot." - }, - "force": { - "type": "boolean", - "title": "Force", - "description": "If force is True, create a snapshot even if the volume is attached to an instance.", - "default": False - } - }, - "required": ["snapshot_name", "description", "force"] - } - - RESTORE_SCHEMA = { - "title": "Volume Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Name", - "description": "The name of the restored volume.", - "default": None - }, - "restore_description": { - "type": "string", - "title": "Restore Description", - "description": "The description of the restored volume.", - "default": None - } - } - } - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -Add this volume snapshot plugin to the entry_points section of setup.cfg. -Add this volume snapshot plugin configuration to provider file. - - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write volume snapshot protection plugin -* Write tests -* Add a usage example about volume snapshot protection - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor . - - -Documentation Impact -==================== - -Add a usage example about volume snapshot protection. - - -References -========== - -None diff --git a/doc/source/specs/file-system-bank.rst b/doc/source/specs/file-system-bank.rst deleted file mode 100644 index ba45d10f..00000000 --- a/doc/source/specs/file-system-bank.rst +++ /dev/null @@ -1,144 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -===================================== -File System based Bank implementation -===================================== - -https://blueprints.launchpad.net/karbor/+spec/file-system-bank - -Problem description -=================== - -Currently we only suppport Swift as a bank implementation. This means that -anyone that uses Karbor must also install Swift. This might be unacceptable or -over complicated for some deployments. Furthe more, having a many options -for bank backends is always a good things. - -I suggest adding an FS based implementation. It will use files for -objects storing objects and object metadata. - -Use Cases -========= - -As explained, deployers might not want or will be unable to install Swift in -their cloud. - -Proposed change -=============== - -Objects would be stored under a file name with their ID having `/` be defined -as a directory separator. - -For example:: - Object ID: /checkpoints/2fd14f87-46bd-43a9-8853-9e1a84ebee3d/index.json - -Since object names might contain chars that are unavailable as regular files -we will need to escape some chars so that they can be used as file names. - -We propose the following encoding escape sequence -non ascii chars would be modified to `%[XX..]` where XX are Hex -representations of the utf-8 encodinf of the characters. - -This avoids using back-slash for escape. - - -Example:: - object*with%wierd*id - => - object%[2A]with%[25]wierd%[2A]id - -The metadata files will be in a JSON format. The name and format of these files -are same as the meatadata objects in the swift bank. - -For example:: - /checkpoints/3a4d76e7-f8d8-4f2f-9c1d-107d88d7a815/ <- directory - /checkpoints/3a4d76e7-f8d8-4f2f-9c1d-107d88d7a815/metadata <- md file - /checkpoints/3a4d76e7-f8d8-4f2f-9c1d-107d88d7a815/status - - -Alternatives ------------- - -Do nothing, this is not a mission critical feature. - -Data model impact ------------------ - -None. - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -This API might be faster\slower than Swift depending on use case. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write Bank Plugin -* Add documentation - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -New docs to explain how to use and configure the alternative Bank -implementation. - - -References -========== - -None diff --git a/doc/source/specs/freezer-protection-plugin.rst b/doc/source/specs/freezer-protection-plugin.rst deleted file mode 100644 index 4eb5b562..00000000 --- a/doc/source/specs/freezer-protection-plugin.rst +++ /dev/null @@ -1,195 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================== -Freezer protection plugin -========================== - -https://blueprints.launchpad.net/karbor/+spec/freezer-protection-plugin - -Problem description -=================== - -Currently, Karbor provides some default protection plugins to protect nova instances -or cinder volumes etc. We should increase support for more protection plugins to satisfy -user's needs more flexibly in different scenarios. - -Freezer is a distributed backup restore and disaster recovery as a service platform. -Now it supports the backup of nova instances and cinder volumes which are managed by -OpenStack environment. At the same time, Freezer can backup the resources to the storage -medias outside the OpenStack environment or different storage medias in a backup. So it -will be useful to introduce Freezer as a protection plugin for Karbor. With Freezer, we -will have more strategies to protect our cloud resources. - -Use Cases -========= - -Users who want to use Freezer as a backup restore service for their OpenStack environment. - -Proposed change -=============== - -Freezer protection plugin -------------------------- - -Freezer protection plugin which supports project, server, volume, image and network -resource types need be implemented. - -For project, freezer plugin will backup all the resources (tenant backup implemented in -Freezer). - -For server, freezer plugin will backup a specify nova instance (if the instance was image -boot, Freezer will backup the image and network; if the instance was volume boot, Freezer -will backup the system volume). - -For volume, freezer plugin will backup a specify cinder volume. - -For image and network, freezer plugin will do nothing as it do not support backup these -two types of resources independently. - -A new protection plugin about Freezer need be implemented. - -1. Protect Operation: - - In main hook of this operation, freezer client will be called to create a freezer job - which contains backup actions to do the backup of protectable resources. - After the backup, the freezer job and its actions will be deleted. - -2. Restore Operation: - - In main hook of this operation, freezer client will be called to create a freezer job - which contains restore actions to do the restore of protectable resources. - After the restore, the freezer job and its actions will be deleted. - -3. Delete Operation: - - In main hook of this operation, freezer client will be called to create a freezer job - which contains admin actions to delete of backup of the resources. - After the deletion, the freezer job and its actions will be deleted. - -Freezer protection plugin schema: -------------------------------------------------- - -:: - - OPTIONS_SCHEMA = { - "title": "Freezer Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the backup.", - "default": None - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the backup." - } - }, - "required": ["backup_name"] - } - - RESTORE_SCHEMA = { - "title": "Freezer Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Resource Name", - "description": "The name of the restore resource ", - "default": None - }, - }, - "required": ["restore_name"] - } - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -Add freezer protection plugin endpoint to setup.cfg. -Add freezer protection plugin configuration to provider file. - - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -Pengju Jiao - -Work Items ----------- - -* Write freezer protection plugin -* Write tests - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -None - -References -========== - -None diff --git a/doc/source/specs/image-boot-server-backup-with-data.rst b/doc/source/specs/image-boot-server-backup-with-data.rst deleted file mode 100644 index 3c49ee0e..00000000 --- a/doc/source/specs/image-boot-server-backup-with-data.rst +++ /dev/null @@ -1,135 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================== -Image boot server backup with data -================================== - -https://storyboard.openstack.org/#!/story/1712059 - -Problem description -=================== - -Currently, we only protect the original images when protecting image boot -servers. Original images means that the data that generated by the users after -the servers has been created is not included. So when restore it from the -backup, a new server with same original image will be created, but users' data -was not included. In some of our product environment, we use image boot -instance, fox example using distributed file system or DRBD as the backend. -So IMO, this is not suitable and we should protect the users' data as well. - -I suggest adding 'image-create' step before doing image backup and using the -new created image id as the backup image id when the image backup is the child -resource in doing server backup. - -Use Cases -========= - -As explained, users may use distributed file system or DRBD as their backend -for booting servers. - -Proposed change -=============== - -Image Protectable Plugin: -When return a protectable instance, a new field would be added to extra-info. -This field must be in the format of:: - - { - "server_id": "value1", - } - -This can tell the image protection plugin the id of the parent server and do -special treatment. - -Image Protection Plugin: -Add a new configuration named 'enable_server_snapshot' to enable create a -snapshot of the server if the resource contains extra_info and the server_id -in extra_info is valid. Then using the new created image id replace with the -resource id and do the final backup. - -Alternatives ------------- - -Do nothing, this is not a mission critical feature. - -Data model impact ------------------ - -None. - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -The image protection plugin may become slower because new step added. - -Other deployer impact ---------------------- - -The default configuration value of 'enable_server_snapshot' is True, users can -set it to False to disable this feature. - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ -Pengju Jiao (jiaopengju@cmss.chinamobile.com) - -Work Items ----------- - -* Add extra_info in getting instances. -* Update the image protection plugin to support doing snapshot in protection. - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -New docs to explain how to use and configure the new added options. - - -References -========== - -None diff --git a/doc/source/specs/index.rst b/doc/source/specs/index.rst deleted file mode 100644 index 09d740cc..00000000 --- a/doc/source/specs/index.rst +++ /dev/null @@ -1,60 +0,0 @@ -Specs -===== - -This section contains detailed specification documents for -different features inside Karbor. - -Approved Specs --------------- - -.. toctree:: - :maxdepth: 1 - - add-db-manage-purge - api-service - bank-plugin-lease - checkpoint-metadata - checkpoint-scheduled-clean - cinder-volume-snapshot-plugin - file-system-bank - instances-extra-info - instances-parameters - manila-share-snapshot-plugins - operation-engine/operation_engine_design - pluggable_protection_provider - protection-service/protection-service - protection-service/restore-design-spec - refactor_clients - remove_heat - restore-resource-status - s3-bank - trove-database-backup-plugins - operation-log-api - policy-in-code - freezer-protection-plugin - kubernetes-pods-protection-plugin - checkpoint-verify-api - volume-glance-protection-plugin - checkpoint-tenant-isolation - service-management-api - adding-more-protection-parameters-retention-period - checkpoint-copy-api - api-json-schema-validation - quotas - image-boot-server-backup-with-data - -Spec Template -------------- -.. toctree:: - :maxdepth: 1 - - skeleton - template - - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/specs/instances-extra-info.rst b/doc/source/specs/instances-extra-info.rst deleted file mode 100644 index 2b7a6dd8..00000000 --- a/doc/source/specs/instances-extra-info.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================================== -Add extra_info field for the response of protectable instances API -================================================================== - -https://blueprints.launchpad.net/karbor/+spec/instances-extra-info - -Problem description -=================== -We can only query the id, name and type of protectable instance using the restful API -of protectable instance. We can not get more info about the resource instance. -Some other info about instance is also needed when we protect resources with different -resources type. -For example: database instance. only the name of database instance is not enough. The -host ip, the database system name about the instance are also needed. - - -Use Cases -========= - -Scenario #1 -User want get the extra info of resource instances from the response of protectable -instances API. Now the protectable instances API only return the id, name and type of -the resource instances. - -Scenario #2 -User uses the Protectable Instances API to query the info of instances from the vendor's -backup software. User also can save the extra_info of resource instances to the plan, -not only the id, name, type of resources. - - - - -Proposed change -=============== -Protectable Instances API: -When return a protectable instance, a new field would be available called -``extra-info``. -This field must be a dict in the format of:: - - { - "key1": "value1", - "key2": "value2", - } - -Keys and values *must* both be strings. -The extra-info of instances is only used for presentation to a user/tenant. -The values in extra-info filed of a resource can not be used inside the protection -service of karbor and protection plugins. - -The UI about the extra-info of protectable instances -Show the extra-info in resource tree page. Add a fa-chevron-right icon before the -Logo of the resource. The extra-info of this resource is collapsed by default. -If a user/tenant click the icon, The extra-info will be displayed under this resource. -Click the icon again, the extra-info will be collapsed. - -Add a new field extra_info to the response for Protectable Instances API:: - - /{project_id}/protectables/{protectable_type}/instances: - get: - summary: Resource Instances - description: | - Return all the available instances for the given protectable type. - examples: - application/json: { - "instances": [ - { - "id": "cb4ef2ff-10f5-46c9-bce4-cf7a49c65a01", - "type": "OS::Nova::Server", - "name": "My VM", - "extra_info": { - "hostname": "KarborServer", - "availability_zone": "AZOne", - "cell_name": "CellOne" - } - "dependent_resources": [ - { - "id": "99777fdd-8a5b-45ab-ba2c-52420008103f", - "type": "OS::Glance::Image", - "name": "cirros-0.3.4-x86_64-uec" - "extra_info": { - "availability_zone": "AZOne", - "cell_name": "CellOne" - } - } - ] - } - ] - } - -Protectable Plugins can return the extra_info of resource Instances. - -The extra_info field in resource database table is only for presentation to a user/tenant. -The values in extra_info field can not be used and modified in karbor protection service. -Add a new field extra_info to resources database table; - -+-------------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+=========================+==============+======+=====+=========+=======+ -| id | Integer | NO | PRI | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| plan_id | varchar(255) | NO | FOR | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resource_id | varchar(36) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resource_type | varchar(64) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resource_name | varchar(255) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resource_extra_info | Text | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted | Boolean | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ - - - -Alternatives ------------- - -Do nothing, this is not a mission critical feature. - -Data model impact ------------------ - -None - -REST API impact ---------------- - -Add a new field extra_info to the response for Protectable Instances API. - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -The new API will be exposed to users via the python-karborclient. - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write API -* Add to Karbor client -* Write tests -* Add a usage example for API - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor and the python-karborclient. - - -Documentation Impact -==================== - -Add a usage example for API. - - -References -========== - -None diff --git a/doc/source/specs/instances-parameters.rst b/doc/source/specs/instances-parameters.rst deleted file mode 100644 index 543cc188..00000000 --- a/doc/source/specs/instances-parameters.rst +++ /dev/null @@ -1,175 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================== -Add parameters field for protectable instances API -================================================== - -https://blueprints.launchpad.net/cinder/+spec/custom-checkpoint-metadata - -Problem description -=================== - -Now the resource instances only can be queried from default region. If there are -several regions in one site/keystone, we can not query resource instances -from different region endpoint. We may need a parameter for the region name. - -The scene of database protection: If we want to use Protectable Instances API to -query database instances from vendor's backup software. We must pass some parameters -about authentication to the RESTfull API of vendor's backup software. - -I think we should add a dict type parameter to Protectable Instances API. The key -and value in parameter, which is needed for implementing some Protectable plugins. - - -Use Cases -========= - -Scenario #1 -User need a parameter for the region name to query resource instances from different -region endpoint. - -Scenario #2 -User uses the Protectable Instances API to query database instances from the vendor's -backup software. User must provide some parameters about authentication to the RESTfull -API of the vendor's backup software. - -A dict type parameter is needed for Protectable Instances API. And it is optional. - -Proposed change -=============== - -Add a new field parameters to the params of request for Protectable Instances API:: - - /{project_id}/protectables/{protectable_type}/instances: - get: - summary: Resource Instances - description: | - Return all the available instances for the given protectable type. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/protectable_typeParam' - - $ref: '#/parameters/nameFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - - $ref: '#/parameters/ParametersParam' - -The params of request: A dictionary-like object containing both the parameters from -the query string and request body. - -Convert the data of parameters to the query string of API. - -For example:: - - "parameters": { - "region_name": "USA" - } - -Add the query string about the parameters to Protectable Instances API:: - - /{project_id}/protectables/{protectable_type}/instances?parameters=%7B%27region_name%27%3A+%27USA%27%7D - - - - -Alternatives ------------- - -Do nothing, this is not a mission critical feature. - -Data model impact ------------------ - -None - -REST API impact ---------------- - -Add a new field parameters to the params of request for Protectable Instances API.:: - - /{project_id}/protectables/{protectable_type}/instances: - get: - summary: Resource Instances - description: | - Return all the available instances for the given protectable type. - parameters: - - $ref: '#/parameters/projectParam' - - $ref: '#/parameters/protectable_typeParam' - - $ref: '#/parameters/nameFilterParam' - - $ref: '#/parameters/sortParam' - - $ref: '#/parameters/limitParam' - - $ref: '#/parameters/markerParam' - - $ref: '#/parameters/ParametersParam' - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -The new API will be exposed to users via the python-karborclient. - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write API -* Add to Karbor client -* Write tests -* Add a usage example for API - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor and the python-karborclient. - - -Documentation Impact -==================== - -Add a usage example for API. - - -References -========== - -None diff --git a/doc/source/specs/kubernetes-pods-protection-plugin.rst b/doc/source/specs/kubernetes-pods-protection-plugin.rst deleted file mode 100644 index 3015f805..00000000 --- a/doc/source/specs/kubernetes-pods-protection-plugin.rst +++ /dev/null @@ -1,216 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================================================= -The kubernetes pod with persistent volumes protectable and protection plugins -============================================================================= - -https://blueprints.launchpad.net/karbor/+spec/kubernetes-pods-protection-plugin - -Problem description -=================== - -With the rapid development of cloud computing, there is a trend of explosive growth in cloud -data over recent years. Cloud data backup and recovery has become an urgent topic which the -customers concern. Running Kubernetes on OpenStack becames more and more popular. The data -protection of the application on Kubernetes also need be considered. - -In this spec we would like to introduce a plugin in Karbor to protect your application deployed -in Kubernetes which runs on top of OpenStack. The application data protected by Karbor include -the configurations and metadata in etcd service, and the persistent volume provided by Cinder. - - -Use Cases -========= - -The kubernetes cluster can run on openstack instances using Openstack cloud provider, the pods -can be created with persistent volumes provided by Cinder. This bp adds kubernetes pods with -persistent volumes protection plugin in Karbor. - -Proposed change -=============== - -The kubernetes pod protectable plugin: --------------------------------------- -A new protectable plugin about The kubernetes pod need be implemented. -The type of resource the kubernetes pod is "OS::Kubernetes::Pod". It will be added to the constant -RESOURCE_TYPES in Karbor. - - -1. The parent resource types: PROJECT_RESOURCE_TYPE - -2. list the resources: - - This interface of plugin will call the 'list_pod_for_all_namespaces' API method in the - kubernetes python client[1]. - -3. show the resource: - - This interface of plugin will call the 'read_namespaced_pod' method API method in the - kubernetes python client. The parameter is a pod id. - -4. get dependent resources: - - The parameter parent_resource is a project, this interface of plugin will return the - kubernetes pod in this project. - -The volume protectable plugin: ------------------------------- -1. Add a new parent resource types: "OS::Kubernetes::Pod" - -2. get dependent resources: - - The parameter parent_resource is a kubernetes pod, this interface of plugin will return the - persistent volumes list provided by Cinder in the this parent resource pod. - - -The kubernetes pod protection plugin ------------------------------------- -A new protection plugin about the kubernetes pod need be implemented. - -1. Protect Operation: - The configurations and metadata in etcd service about the pod will be saved to - the bank of Karbor. - -2. Restore Operation: - The persistent volumes of the pod will be restored by Cinder Volume plugins. - - Get the configurations and metadata in etcd service about the pod from bank, and create - a new pod with restored persistent volumes from cinder in the kubernetes cluster. - -3. Delete Operation: - - The configurations and metadata about the pod will be deleted from the bank. - The backup data of persistent volumes will be deleted from Cinder. - -The kubernetes pod protection plugin schema: --------------------------------------------- - -:: - - OPTIONS_SCHEMA = { - "title": "The kubernetes pod Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the kubernetes pod backup." - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the kubernetes pod backup." - } - }, - "required": ["backup_name", "description"] - } - - RESTORE_SCHEMA = { - "title": "The kubernetes pod Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Name", - "description": "The name of the restored kubernetes pod.", - "default": None - }, - "restore_description": { - "type": "string", - "title": "Restore Description", - "description": "The description of the restored kubernetes pod.", - "default": None - } - } - } - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -Add the kubernetes pod protection plugin endpoint to setup.cfg. -Add the kubernetes pod protection plugin configuration to provider file. - - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write kubernetes pod backup protectable plugin -* Write kubernetes pod backup protection plugin -* Write tests -* Add a usage example about kubernetes pod protection - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -Add a usage example about kubernetes pod protection. - - -References -========== - -[1] https://github.com/kubernetes-incubator/client-python diff --git a/doc/source/specs/manila-share-snapshot-plugins.rst b/doc/source/specs/manila-share-snapshot-plugins.rst deleted file mode 100644 index 6eec788f..00000000 --- a/doc/source/specs/manila-share-snapshot-plugins.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=================================================== -Manila share protectable and protection plugins -=================================================== - -https://blueprints.launchpad.net/karbor/+spec/manila-share-proection-plugin - -Problem description -=================== - -The shares managed by Manila can not be protected by Karbor now. Currently, Manila allows -the user to create snapshots of the share. So the protection feature of share can be -introduced to karbor by making a snapshot of the share. - - -Use Cases -========= - -User creates the share in Manila, and mounts it to the server. Then the share -is used for saving lots of files data by user. To avoid the loss of files data,the user -want to protect the shares by making periodic snapshots of this share. -If the user want to restore the share, he can create a new share from a snapshot. - -Proposed change -=============== - -Manila share protectable plugin: --------------------------------- -A new protectable plugin about Manila share need be implemented. -The type of resource share is "OS::Manila::Share". It will be added to the constant -RESOURCE_TYPES in karbor. - - -1. The parent resource types: -PROJECT_RESOURCE_TYPE. - -2. list the resources: -This interface of plugin will call the 'list' method of ShareManager in manilaclient. - -3. show the resource: -This interface of plugin will call the 'get' method of ShareManager in manilaclient. -The parameter is a share id. - -4. get dependent resources: -The parameter parent_resource is a project, this interface of plugin will return the -shares in this project. - - -Manila share protection plugin --------------------------------- -A new protection plugin about Manila share need be implemented. - -1. Protect Operation: -The 'create' method of ShareSnapshotManager will be called in the main hook -of this operation to make a snapshot of the share. - -2. Restore Operation: -The 'create' method of ShareManager -will be called in the main hook of this operation to create a new share from -the giving snapshot. - -3. Delete Operation: -The share snapshot will be deleted. -The 'delete' method of ShareSnapshotManager will be called in the main hook -of this operation to delete the share snapshot. - -Manila share protection plugin schema: --------------------------------------- - -:: - - OPTIONS_SCHEMA = { - "title": "Share Protection Options", - "type": "object", - "properties": { - "snapshot_name": { - "type": "string", - "title": "Snapshot Name", - "description": "The name of the snapshot." - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the share snapshot." - }, - "force": { - "type": "boolean", - "title": "Force", - "description": "Optional flag to indicate whether to snapshot a share even if it's busy.", - "default": False - } - }, - "required": ["snapshot_name", "description", "force"] - } - - RESTORE_SCHEMA = { - "title": "Share Protection Restore", - "type": "object", - "properties": { - "share_id": { - "type": "string", - "title": "Share ID", - "description": "The target share ID to restore to." - }, - "restore_name": { - "type": "string", - "title": "Restore Name", - "description": "The name of the restored share.", - "default": None - }, - "restore_description": { - "type": "string", - "title": "Restore Description", - "description": "The description of the restored share.", - "default": None - } - } - } - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -Add the share protection plugin endpoint to setup.cfg. -Add the share protection plugin configuration to provider file. - - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write share snapshot protectable plugin -* Write share snapshot protection plugin -* Write tests -* Add a usage example about share protection - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -Add a usage example about share protection. - - -References -========== - -None diff --git a/doc/source/specs/operation-engine/create_scheduled_operation_seq_diagram.pu b/doc/source/specs/operation-engine/create_scheduled_operation_seq_diagram.pu deleted file mode 100644 index 8b6d0e68..00000000 --- a/doc/source/specs/operation-engine/create_scheduled_operation_seq_diagram.pu +++ /dev/null @@ -1,56 +0,0 @@ -@startuml - -title create scheduled operation -hide footbox - -actor User -participant API_Service as AS -participant MQ -participant OperationEngine as OE -participant Engine.Trigger as engine -participant DB - -User -> AS:create scheduled operation -activate AS - -AS --> MQ:create scheduled operation -activate MQ - -MQ -> OE:create scheduled operation -activate OE - -OE -> DB:create operation and set status to init -activate DB - -DB --> OE:finish -deactivate DB - -OE -> DB:get trigger info -activate DB - -DB --> OE:trigger info -deactivate DB - -OE -> engine:register operation -activate engine - -engine --> OE:finish -deactivate engine - -OE -> DB:update operation's status to registered -activate DB - -DB --> OE:finish -deactivate DB - -OE -> MQ:finish -deactivate OE - -MQ -> AS:finish -deactivate MQ - -AS --> User:finish -deactivate AS - -@enduml - diff --git a/doc/source/specs/operation-engine/delete_scheduled_operation_seq_diagram.pu b/doc/source/specs/operation-engine/delete_scheduled_operation_seq_diagram.pu deleted file mode 100644 index 143fe595..00000000 --- a/doc/source/specs/operation-engine/delete_scheduled_operation_seq_diagram.pu +++ /dev/null @@ -1,43 +0,0 @@ -@startuml - -title delete scheduled operation -hide footbox - -actor User -participant API_Service as AS -participant OperationEngine as OE -participant Engine.Trigger as engine -participant DB - -User -> AS:delete scheduled operation -activate AS - -AS -> OE:delete scheduled operation -activate OE - -OE -> DB:update operation's status to deleted -activate DB - -DB --> OE:finish -deactivate DB - -OE -> engine:unregister operation -activate engine - -engine --> OE:finish -deactivate engine - -OE -> DB:delete operation -activate DB - -DB --> OE:finish -deactivate DB - -OE -> AS:finish -deactivate OE - -AS --> User:finish -deactivate AS - -@enduml - diff --git a/doc/source/specs/operation-engine/operation_engine_class_diagram.pu b/doc/source/specs/operation-engine/operation_engine_class_diagram.pu deleted file mode 100644 index 4911c7df..00000000 --- a/doc/source/specs/operation-engine/operation_engine_class_diagram.pu +++ /dev/null @@ -1,79 +0,0 @@ -@startuml - -title OperationEngine class diagram - -class OperationEngineManager -abstract class BaseTrigger -abstract class Executor -class OperationManager -class ScheduledOperation -class TimeTrigger -class EventTrigger -interface Operation -class OperationExeInfo -class Trigger -class ProtectOperation -class DeleteCheckpointOperation - -OperationEngineManager *-- BaseTrigger -OperationEngineManager *-- Executor -OperationEngineManager *-- OperationManager -BaseTrigger -- ScheduledOperation -BaseTrigger "1" o-- "1" Executor -Executor -- ScheduledOperation -Executor -- OperationManager -Executor -- OperationExeInfo -OperationManager *-- Operation -BaseTrigger <|-- TimeTrigger -BaseTrigger <|-- EventTrigger -ScheduledOperation -- Trigger -Operation <|-- ProtectOperation -Operation <|-- DeleteCheckpointOperation - -abstract class BaseTrigger { -_executor: Executor - -register_operation(op_id, op_name, **kwargs) -unregister_operation(op_id, op_name) -} - -abstract class Executor { -submit_op(op_id, info) -} - -class ScheduledOperation { -id: uuid -name: string -operation_type:string -trigger_id:uuid -operation_definition:dict -} -note left: class of DB table scheduled_operations - -class Trigger { -id:uuid -type:string -properties:dict -} -note left: class of DB table triggers - -class OperationManager { -operation_obj_map:dict - -check_operation_definition(op_type, operation_definition) -execute_operation(op_type, operation_definition, operation_exe_info) -} - -interface Operation { -check_operation_definition(operation_definition) -execute(operation_definition, operation_exe_info) -} - -class OperationExeInfo { -id:uuid -extend_info:string -} -note "class of DB table operation_exe_infos" as N1 -OperationExeInfo -- N1 - -@enduml diff --git a/doc/source/specs/operation-engine/operation_engine_design.rst b/doc/source/specs/operation-engine/operation_engine_design.rst deleted file mode 100644 index 929f7698..00000000 --- a/doc/source/specs/operation-engine/operation_engine_design.rst +++ /dev/null @@ -1,140 +0,0 @@ - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - -================ -Operation Engine -================ - -https://blueprints.launchpad.net/karbor/+spec/operation-engine-design - -Operation Engine is one of components in Karbor, which is responsible for -triggering the operations to execute when the time is up or event happens. - -Problem description -=================== -1. Define the operations and triggers (time or event) -2. Bind the operation with trigger and activate the trigger -3. Define the executor which will run the operations -4. Ensure the high availability of Operation Engine Service - -.. image:: ../../../images/operation-engine/operation_engine_architecture_diagram.png - :height: 300px - :align: right - -Use Cases ---------- -1. CRUD operations and triggers - -Proposed Change -=============== - -Data model impact ------------------ -There are 5 relevant tables in the DB. - -1. triggers -2. services -3. scheduled_operations - These three tables are defined at `https://blueprints.launchpad.net/openstack/?searchtext=api-service-design`. - Please see the bp to get the details. -4. scheduled_operation_states - -.. image:: ../../../images/operation-engine/operation_state_diagram.png - :height: 500px - :align: right - -+--------------------+--------------+------+-----+---------+----------------+ -| Field | Type | Null | Key | Default | Extra | -+====================+==============+======+=====+=========+================+ -| created_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| updated_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| deleted_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| id | init(11) | NO | PRI | NULL | auto_increment | -+--------------------+--------------+------+-----+---------+----------------+ -| operation_id | varchar(36) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| service_id | int(11) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ -| state | varchar(32) | NO | | NULL | values: | -| | | | | | | -| | | | | | - init | -| | | | | | - registered | -| | | | | | - triggered | -| | | | | | - running | -| | | | | | - deleted | -| | | | | | | -+--------------------+--------------+------+-----+---------+----------------+ -| deleted | tinyint(1) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+----------------+ - -FOREIGN KEY(operation_id) REFERENCES scheduled_operations(id) -FOREIGN KEY(service_id) REFERENCES Services(id) - - -5. scheduled_operation_logs - -+--------------------+--------------+------+-----+---------+--------------------------+ -| Field | Type | Null | Key | Default | Extra | -+====================+==============+======+=====+=========+==========================+ -| created_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| updated_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| deleted_at | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| id | int(11) | NO | PRI | NULL | auto_increment | -+--------------------+--------------+------+-----+---------+--------------------------+ -| operation_id | varchar(36) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| expect_start_time | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| triggered_time | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| actual_start_time | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| end_time | datetime | YES | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| state | varchar(32) | NO | | NULL | values: | -| | | | | | | -| | | | | | * in_progress | -| | | | | | * success | -| | | | | | * failed | -| | | | | | * dropped_out_of_window | -| | | | | | | -+--------------------+--------------+------+-----+---------+--------------------------+ -| extend_info | Text | YES | | NULL | execution info | -+--------------------+--------------+------+-----+---------+--------------------------+ -| deleted | tinyint(1) | NO | | NULL | | -+--------------------+--------------+------+-----+---------+--------------------------+ - -FOREIGN KEY(operation_id) REFERENCES scheduled_operations(id) - -instance: - -+---------------------+---------------------+---------------------+-----+----------------------------------+---------------------+---------------------+---------------------+---------------------+------------------+---------------------+---------------------+ -| created_at | updated_at | deleted_at | id | operation_id | expect_start_time | triggered_time | actual_start_time | end_time | status | extend_info | deleted | -+=====================+=====================+=====================+=====+==================================+=====================+=====================+=====================+=====================+==================+=====================+=====================+ -| 2016-01-01 01:00:02 | 2016-01-01 01:00:07 | NULL | 0 | 0354ca9ddcd046b693340d78759fd274 | 2016-01-01 01:00:00 | 2016-01-01 01:00:02 | 2016-01-01 01:00:05 | 2016-01-01 01:00:07 | success | NULL | 0 | -+---------------------+---------------------+---------------------+-----+----------------------------------+---------------------+---------------------+---------------------+---------------------+------------------+---------------------+---------------------+ - -Class Diagram -------------- - -.. image:: ../../../images/operation-engine/operation_engine_class_diagram.png - :height: 600px - -Flow ----- - -.. image:: ../../../images/operation-engine/create_scheduled_operation_seq_diagram.png - :height: 400px - -.. image:: ../../../images/operation-engine/delete_scheduled_operation_seq_diagram.png - :height: 400px - diff --git a/doc/source/specs/operation-engine/operation_state_diagram.pu b/doc/source/specs/operation-engine/operation_state_diagram.pu deleted file mode 100644 index ccb5e9e9..00000000 --- a/doc/source/specs/operation-engine/operation_state_diagram.pu +++ /dev/null @@ -1,20 +0,0 @@ -@startuml - -[*] --> init: create - -init --> registered: register to triggers - -registered --> triggered: trigger and submit to executor - -triggered --> running: executor runs it - -running --> registered: finish - -init --> deleted: delete -registered --> deleted: delete -triggered --> deleted: delete -running --> deleted: delete - -deleted --> [*] - -@enduml diff --git a/doc/source/specs/operation-log-api.rst b/doc/source/specs/operation-log-api.rst deleted file mode 100644 index ed6a4c44..00000000 --- a/doc/source/specs/operation-log-api.rst +++ /dev/null @@ -1,257 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================ -Add operation log API about protection to Karbor -================================================ - -https://blueprints.launchpad.net/karbor/+spec/operation-log-api - -Problem description -=================== - -#usecase 1 -The resources in a plan can be protected automatically or manually by the end user -via the checkpoint POST API. And then the checkpoints (about the metadata and backup -data) are created. - -The checkpoints also can be deleted manually by end user via checkpoint DELETE API. -In the bp **Checkpoint scheduled clean**, the parameters max_backups and -retention_duration will be introduced to karbor. So the checkpoints also can be -deleted automatically. - -But if the end user want to query deleted protection log in history, he can not get -these deleted protection log via checkpoint restful API. - -#usecase 2 -Now user also want to query the logs about the operations like restore, delete, etc via -one RESTful API.. - -Use Cases -========= - -User want to query all the operation logs via a uniform RESTful API, including all -the logs for different operation type(protect/restore/delete). - -So the operation logs not only include logs for checkpoint, also include the logs for -restore and delete operation. - -Proposed change -=============== - -As we know, the Checkpoint is responsible for the model of backup data which records -all the metadata of backup data in the whole lifecycle, it is not responsible for the -model of protection log. So Karbor need expose one RESTful api to support that the -end-user can query all the protection log about the plan including available, error -and deleted log records. -About this patch: Add operation log endpoints to API document[1]. We have a plan to -add operation log API to Karbor. But in this patch, we only consider the situation -that the checkpoints are created automatically via scheduled_operation API. We also -need consider the situation that the checkpoints are created manually directly via -checkpoint API. We need redesign the operation log API to meet above requirement. - -We also need consider that operation logs about restore and delete, so we add a filed -operation_type to data module. Its value could be protect, delete, restore. - -The "extra_info" field of operation log data module can be used for saving the detail -information by the vendor's plugin. For example: the job/task id and job/task description -about this protect operation action from the plugin backend, the backup software can be saved -to this field. So the tenant and admin can query this detail information about this -operation from the backup software via the operation log API. - - -1. The status of operation log:: -OPERATION_LOGS_PROTECTING = 'protecting' -OPERATION_LOGS_AVAILABLE = 'available' -OPERATION_LOGS_STATUS_ERROR = 'error' -OPERATION_LOGS_DELETING = 'deleting' -OPERATION_LOGS_DELETED = 'deleted' -OPERATION_LOGS_DELETING = 'restoring' -OPERATION_LOGS_DELETED = 'restored' -OPERATION_LOGS_ERROR_DELETING = 'error-deleting' -OPERATION_LOGS_ERROR_DELETING = 'error-restoring' - -2. Create -The checkpoint can be created manually directly via checkpoint API. In this situation, -The scheduled_operation_id filed of operation_log versioned object is None. -When the checkpoint is created automatically via scheduled_operation API. The value of -scheduled_operation_id can be get from the 'extra-info' of checkpoint POST API. This value -can be set to the filed of operation_log versioned object. -The operation_log object will be created after the checkpoint object being created in -RPC method 'protect' of protect service manager. - -When a checkpoint is deleted manually directly via checkpoint API. In this situation, a -operation_log object about delete operation type need be created. - -When a checkpoint is restored manually directly via checkpoint API. In this situation, a -operation_log object about restore operation type need be created. - - -2. Update the status of operation_log. -If the checkpoint has not created successfully in the protect flow. The status of operation_log -object will be set to 'error' in the 'revert' method of InitiateProtectTask. -The status of operation_log object will be set to 'available' in the CompleteProtectTask, -the end_time of object also will be updated. - -When the user want to delete a checkpoint, the status of operation_log object will be set to -'deleted' after the checkpoint being deleted. - -When the user want to restore a checkpoint, the status of operation_log object will be set to -'restored' after the checkpoint being restored. - -Alternatives ------------- - -None - -Data model impact ------------------ - -+-------------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------------------+--------------+------+-----+---------+-------+ -| id | varchar(36) | NO | PRI | NULL | | -| project_id | varchar(255) | NO | | NULL | | -| operation_type | varchar(255) | NO | | NULL | | -| checkpoint_id | varchar(36) | YES | | NULL | | -| plan_id | varchar(36) | YES | | NULL | | -| provider_id | varchar(36) | YES | | NULL | | -| restore_id | varchar(36)) | YES | | NULL | | -| scheduled_operation_id | varchar(36)) | YES | | NULL | | -| status | varchar(64) | YES | | NULL | | -| started_at | Datetime | YES | | NULL | | -| ended_at | Datetime | YES | | NULL | | -| error_info | Text | YES | | NULL | | -| extra_info | Text | YES | | NULL | | -| created_at | Datetime | YES | | NULL | | -| updated_at | Datetime | YES | | NULL | | -| deleted_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ - -REST API impact ---------------- - -1. List operation_logs API -The response JSON when listing operation logs:: - - **get** : /v1/{project_id}/providers/{provider_id}/operation_logs - ```json - { - "operation_logs":[ - { - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "operation_type": "protect", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "plan_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "provider_id": "23902b02-5666-4ee6-8dfe-962ac09c3994", - "restore_id": None, - "scheduled_operation_id": "23902b02-5666-4ee6-8dfe-962ac09c3991", - "started_at": "2015-08-27T09:50:58-05:00", - "ended_at": "2015-08-27T10:50:58-05:00", - "status": "protecting", - "error_info": "Could not access bank" - "extra_info": { - "tsm_job_id": 10, - "rate": 20 - } - } - ] - } - - -2. Show operation_logs API -The response JSON when showing a operation log:: - - **get** : /v1/{project_id}/providers/{provider_id}/operation_logs/{operation_log_id} - ```json - { - "operation_log":{ - "id": "22b82aa7-9179-4c71-bba2-caf5c0e68db7", - "project_id": "e486a2f49695423ca9c47e589b948108", - "operation_type": "protect", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "plan_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "provider_id": "23902b02-5666-4ee6-8dfe-962ac09c3994", - "restore_id": None, - "scheduled_operation_id": "23902b02-5666-4ee6-8dfe-962ac09c3991", - "started_at": "2015-08-27T09:50:58-05:00", - "ended_at": "2015-08-27T10:50:58-05:00", - "status": "protecting", - "error_info": "Could not access bank" - "extra_info": { - "tsm_job_id": 10, - "rate": 20 - } - } - } - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Add a new RESTful API about operation log -* Add database data module of operation log -* Add operation log to karbor client - -Dependencies -============ - - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -None - -References -========== - -[1] https://review.opendev.org/#/c/298060/ - diff --git a/doc/source/specs/pluggable_protection_provider.pu b/doc/source/specs/pluggable_protection_provider.pu deleted file mode 100644 index e30c5209..00000000 --- a/doc/source/specs/pluggable_protection_provider.pu +++ /dev/null @@ -1,126 +0,0 @@ -@startuml - -title Pluggable Protection Provider - -class ResourceType extends String { - -} - -class Resource { - +type: ResourceType - +id: UUID -} - -class ResourceGraphNode { - resource: Resource - dependent_resources: []ResourceGraphNode -} - -ResourceGraphNode "1" o- "0..*" ResourceGraphNode - -class ResourceGraphWalker { - +constructor(sources: []ResourceGraphNode) - +add_listener(listener: ResourceGraphWalkerListener) - +walk() -} - -ResourceGraphWalker -- ResourceGraphWalkerListener - -interface ResourceGraphWalkerListener { - on_node_enter(node: ResourceGraphNode, is_first_visit: boolean) - on_node_exit(node: ResourceGraphNode, is_first_visit: boolean) -} - -ResourceGraphNode *- Resource - -class ProtectableRegistry { - + {static} fetch_dependant_resources(resource: Resource): []Resource - + {static} register(resource_type: ResourceType, protectable: Protectable) - + {static} list_resources(resource_type: ResourceType): [] Resource -} - -ProtectableRegistry --> Resource: <> - -ProtectableRegistry "1" *- "*" Protectable - -interface Protectable { - + possible_parent_types(resource_type: ResourceType): []ResourceType - + fetch_child_resources(resource: Resource): []Resource - + list_resources(resource_type: ResourceType): []Resource -} - -Resource *- ResourceType - -enum Operation { - protect - start - suspend - restore - delete -} - -class Context { - +plan: ProtectionPlan - +operation: Operation - +parameters: dict - +resource: ResourceGraphNode - +bank_section: BankSection - +is_first_visit: boolean - +task_builder: TaskBuilder -} - -interface BankPlugin { - -} - -interface BankSection extends BankPluginInterface { - is_writeable(): bool -} - -Context *-- TaskBuilder -Context *-- BankSection - -interface Task { - -} - -note left of Task - Opaque object -end note - -interface TaskBuilder { - add_task(target: function, args=collection): Task - link_tasks(a: Task, b: Task) -} - -TaskBuilder --> Task: Creates - -Context -- Operation - -interface ProtectionPlugin { - ..metadata functions.. - get_supported_resources_types(): []ResourceType - ..graph walk functions.. - +on_resource_start(context: Context) - +on_resource_end(context: Context) - ..schema functions.. - +get_options_schema(resource_type: ResourceType) - +get_saved_info_schema(resource_type: ResourceType) - +get_restore_schema(resource_type: ResourceType) - +get_saved_info(metadata_store: MetadataStore, resource: Resource) -} - -ProtectionPlugin -- Context - -interface ProtectionProvider { -} - -class PluggableProtectionProvider extends ProtectionProvider { - -plugins: [ResourceType]ProtectionPlugin -} - -PluggableProtectionProvider *-- "1..*" ProtectionPlugin: Uses for functionality -PluggableProtectionProvider -> ResourceGraphWalker: uses it to iterate over graph -ResourceGraphWalker - ResourceGraphNode - -@enduml diff --git a/doc/source/specs/pluggable_protection_provider.rst b/doc/source/specs/pluggable_protection_provider.rst deleted file mode 100644 index 3792f468..00000000 --- a/doc/source/specs/pluggable_protection_provider.rst +++ /dev/null @@ -1,256 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -.. raw:: html - - - -.. role:: red -.. role:: green -.. role:: yellow -.. role:: indigo -.. role:: purple -.. role:: black - -============================= -Pluggable Protection Provider -============================= - -https://blueprints.launchpad.net/karbor/+spec/protection-plugin-is-design - -Protection Provider -=================== - -Protection Provider is a user-facing, configurable, pluggable entity, that -supplies the answer for the questions: "how to" and "where to". By composing -different bank-store (responsible for the "where to") and different *Protection -Plugins* (each responsible for the "how to"). The Protection Provider is -configurable, both in the terms of bank and protection plugins composition, and -in their configuration. - -The protection provider will contain internally, a map between any registered -*Protectable* (OpenStack resource type) and a corresponding *Protection -Plugin*, which is used for operations related to any appropriate resource. - -There are 3 resource operations a *Protection Provider* supports, and any -*Protection Plugin* needs to implement. These operations usually act on -numerous resources, and the *Protection Provider* infrastructure is responsible -for using the corresponding *Protection Plugin* implementation, for each -resource. The *Protection Provider* is responsible for initiating a DFS traverse -of the resource graph, building tasks for each of the resources, and linking -them in respect of the execution order and dependency. - -#. **Protect**: the protection provider will traverse the selected resources - from the resource graph -#. **Restore**: the protection provider will traverse the resource graph saved - in the checkpoint -#. **Delete**: the protection provider will traverse the resource graph saved - in the checkpoint - -After the entire graph has been traversed, the Protection Provider will return -the task flow which will be queued and then executed according to the -executor's policy. When all the tasks are done the operation is considered -complete. - -Protection Provider Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Protection Providers are loaded from configuration files, placed in the -directory specified by the ``provider_config_dir`` configuration option (by -default: ``/etc/karbor/providers.d``). Each provider configuration file must -bear the ``.conf`` suffix and contain a ``[provider]`` section. This section -specifies the following configuration: - -#. ``name``: the display name of the protection provider -#. ``id``: unique identifier -#. ``description``: textual description -#. ``bank``: path to the bank plugin -#. ``plugin``: path to a protection plugin. Should be specified multiple times - for multiple protection plugins. Every *Protectable* **must** have a - corresponding *Protection Plugin* to support it. - -Additionally, the provider configuration file can include other section -(besides the ``[provider]`` section), to be used as configuration for each bank -or protection plugin. - -For example:: - - [provider] - name = Foo - id = 2e0c8826-81d6-44f5-bbe5-8f46a98c5845 - description = Example Protection Provider - bank = karbor.protections.karbor-swift-bank-plugin - plugin = karbor.protections.karbor-volume-protection-plugin - plugin = karbor.protections.karbor-image-protection-plugin - plugin = karbor.protections.karbor-server-protection-plugin - plugin = karbor.protections.karbor-project-protection-plugin - - [swift_client] - bank_swift_auth_url = http://10.0.0.10:5000 - bank_swift_user = admin - bank_swift_key = password - -Protection Plugin -================= - -A *Protection Plugin* is a component responsible for the implementation of -operations (protect, restore, delete) of one or more *Protectable* (i.e -resource type). When writing a *Protection Plugin*, the following needs to be -defined: - -#. Which resources does the protection plugin support -#. What is the schema of parameters for each operation -#. What is the schema of information the protection plugin stores in a - Checkpoint -#. The implementation of each operation - -Protection Plugin API & Workflow -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -*Protection Plugin* defines how to protect, restore, and delete resources. - -When performing an operation there might be a need for a ProtectionPlugin to -perform actions on a resource before or after some operation was performed on -a related resource. - -For example, before takin a spanshot of a volume you need to quiesce the VM -and\or run any guest agent operation. Doing it after taking the checkpoint is -useless. - -On the other hand, when copying a volume's data to different sites there is no -need for other operations to wait on the copy. - -Finally, there might be a need to perform an operation marking a transaction -as successful after everything related to a VM was protected. - -Looking at this we see there are 3 distinct phases for every protection. - -#. *Preparation Phase*: This phase for performing actions in relation to a - resource's dependencies. It's called the "Preparation Phase" because it - where a plugin should do all the preparation required for the next phase. - Operation in this phase should be as short as possible since they are not - parraralized as much as in the following phases. As an example, taking - snapshots of all the volumes should happen in relation to the owning VMs - and also happen in a narrower time frame. Copying those snapshots can - happen later and is much more parallizable. -#. *Main Phase*: This phase is for doing work that has no dependencies or time - sensitivity. This will be mainly used for transferring the large amount of - information generated in the backup to different sites. -#. *Completion Phase*: This phase is for performing work once *all* the work, - not just preparation, was completed on a resource and all of it's - dependencies. This is a good place to attach resources (in case of restore) - or close transactions. - -As a Protection Plugin developer you want to minimize the work needed to be -done in the preparation and completion phases and do the bulk of the work in -the main phase since will allow for the most efficient execution of the -operation. - -It's important to note that a developer doesn't have to do any action during a -phase. It's completely valid to only use the main or preparation phase. In -fact, we think it's going to be very rare that a Protection Plugin will need -to use all the phases. - -In order to specify the detailed flow of each operation, a *Protection Plugin* -needs to implement numerous 'hooks'. These hooks, differ from one another by -their time of execution in respect to other hooks, either of the same -resource, or other resources. - -For *each* operation the pluggin can implement each of the hooks: - -#. **Preparation hooks**: as noted, preparation is for running tasks in - relation to other resources in the graph. This is why two hooks exist, one - for running before dependent resources' pereperation and one for after. - - #. **Prepare begin hook**: invoked before any hook of this resource and - dependent resources has begun. - - For tasks that need to happen before any dependent resource's operations - begin - - Hook method name: **on_prepare_begin** - - #. **Prepare finish hook**: invoked after any prepare hooks of dependent - resources are complete. - - For tasks that finish the work began in *prepare begin hook*, for tasks that - require that the dependent resource's prepare phase finished - - Hook method name: **on_prepare_finish** - -#. **Main hook**: invoked after the resource *prepare hooks* are complete. - - For tasks that do heavy lifting and can run in parallel to dependent or - dependee resources *main hooks* - - Hook method name: **on_main** - -#. **Complete hook**: invoked once the resource's main hook is complete, and - the dependent resources' *complete hooks* are complete - - For tasks that require that the dependent resource's operations are - complete, and finalize the operation on the resource. - - Hook method name: **on_complete** - -For example: a Protection Plugin for Nova servers, might implement a protect -operation by using *prepare begin hook* to quiesce the Server and/or contact a -guest agent to complete transactions. A protection plugin for Cinder volumes -can implement *prepare finish hook* to take a snapshot of the volume. The -server's *prepare finish hook* unquiesces the server and/or contacts a guest -agent. Both the server's and the volume's *main hook* do the heavy lifting of -copying the data. - -Notes: - -* Unimplemented methods are practically no-op -* Each such method receives as parameters: ``checkpoint``, ``context``, - ``resource``, and ``parameters`` objects - -:: - - def prepare_finish(self, checkpoint, context, resource, parameters): - ... - -.. figure:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/protection-service/hooks.png - :alt: Protection Plugin Hooks - :align: center - - Protection Plugin Hooks - - :green:`Green`: Child resource Prepare_begin depends on its parent resource - Prepare_begin - - :indigo:`Indigo`: The resource Prepare_finish depends on the resource - Prepare_begin - - :purple:`Purple`: Parent resource Prepare_finish depends on the child - resource Prepare_finish - - :yellow:`Yellow`: The resource Main depends on the resource Prepare_finish - - :red:`Red`: The resource Complete depends on the resource Main - - :black:`Black`: Parent resource Complete depends on the child resource's - Complete - - - -This scheme decouples the tree structure from the task execution. A plugin that -handles multiple resources or that aggregates multiple resources to one task can -use this mechanism to only return tasks when appropriate for it's scheme. - -References -========== -1. `Class Diagram Source `_ -2. `Dependency graph building algorithm `_ diff --git a/doc/source/specs/policy-in-code.rst b/doc/source/specs/policy-in-code.rst deleted file mode 100644 index befae759..00000000 --- a/doc/source/specs/policy-in-code.rst +++ /dev/null @@ -1,168 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============== -Policy in code -============== - -https://blueprints.launchpad.net/karbor/+spec/policy-in-code - -The oslo.policy library now supports handling policies in a way similar to -how oslo.config handles config options. We can switch our policy handling -to keep default policy settings in the code, with the policy file only -necessary for overriding the defaults. - -By having default policies in code, this allows: - -#. Simplified deployment and upgrades since config files don't need to change - that don't modify defaults. -#. Easier maintenance of policy files since they only have overridden policies. -#. A programmatic way to generate policy documentation and samples similar to - how we handle config options. - - -Problem description -=================== - -There have been bugs in the past from either new features adding the wrong -policy settings, or for getting policy settings all together. For admins it -can also be difficult configuring policies due to many policy settings that -are never changed making locating the desired setting harder. - -This would simplify our policy.json file by only needing to have those entries -that differ from the defaults. It would also allow us to generate a sample -policy file similar to how we do for karbor.conf to show all the possible -settings with the defaults commented out for easy reference. - -Use Cases -========= - -As a deployer I would like to configure only policies that differ from the -default. - -Proposed change -=============== - -Starting with oslo.policy 1.9.0 [0], policies can be declared in the code with -provided defaults and registered with the policy engine. - -Any policy that should be checked in the code will be registered with the -policy.Enforcer object, similar to how configuration registration is done. -Any policy check within the code base will be converted to use a new -policy.Enforcer.authorize method to ensure that all checks are defined. Any -attempt to use a policy that is not registered will raise an exception. - -Registration will require two pieces of data: - -1. The rule name, e.g. "plan:get" -2. The rule, e.g. "rule:admin_or_owner" or "role:admin" - -The rule name is needed for later lookups. The rule is necessary in order to -set the defaults and generate a sample file. - -A third optional description can also be provided and should be used in most -cases so it is available in any generated sample policy files. - -We can then use this code to add a job that will generate a sample policy.json -file showing the commented out defaults directly from the code base. - -[0] https://github.com/openstack/oslo.policy/blob/1.9.0/doc/source/usage.rst#registering-policy-defaults-in-code - -Alternatives ------------- - -Stick with our manually configured policy file that has no checks or full -reference for possible policy enforceable settings. - -Data model impact ------------------ - -None. - -REST API impact ---------------- - -None. - -Security impact ---------------- - -None. Although this touches our policy handling, this just enables preemptive -policy checks and does not change the way we handle policy enforcement. - -Notifications impact --------------------- - -None. - -Other end user impact ---------------------- - -None. - -Performance Impact ------------------- - -There will be slightly more work done at service startup time as policies are -registered, which should be a very small impact. Policy checking at run time -may become slightly faster due to having a smaller policy file to read before -each check. - -Other deployer impact ---------------------- - -End user admins will no longer need to have all settings defined in the -policy.json file, only those that they want different than the defaults. - -Developer impact ----------------- - -Any policies added to the code should be registered before they are used. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - chenying - -Work Items ----------- - -* Define and register all policies. -* Switch policy checks to use new method. -* Update Devstack to not expect policy.json file. -* Update deployer documentation. -* Update tox genconfig target to also generate sample policy file. - -Dependencies -============ - -Current requirements are for oslo.policy >= 1.9.0, so no extra dependencies -are required. - -Testing -======= - -If done correctly, no additional or different testing should be required. -Existing tests should detect if there are any changes in the expected policy -behavior. - -Documentation Impact -==================== - -Documentation should be updated to state that only policies which are changes -to the default policy will be needed when configuring policy settings. - -Updates will also be made to our devref documentation describing the process -for generating the sample policy file. - -References -========== - -None. diff --git a/doc/source/specs/protection-service/activities-links.svg b/doc/source/specs/protection-service/activities-links.svg deleted file mode 100644 index 85c131fe..00000000 --- a/doc/source/specs/protection-service/activities-links.svg +++ /dev/null @@ -1,274 +0,0 @@ - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/specs/protection-service/class-diagram.pu b/doc/source/specs/protection-service/class-diagram.pu deleted file mode 100644 index e00a1fd8..00000000 --- a/doc/source/specs/protection-service/class-diagram.pu +++ /dev/null @@ -1,173 +0,0 @@ -@startuml - -title ProtectionService Class Diagram - -class RpcServer { - -endpoints: []Manager_Class - -target: messaging.Target -} - -class ProtectionManager { - +<>execute_operation(backup_plan:BackupPlan, action:Action) - +<>list_providers(list_options:{}): []Providers - +<>show_provider(provider_id:String}:Provider - +<>list_checkpoints(list_options:{}): []Checkpoint - +<>show_checkpoint(provider_id:String, checkpoint_id:String): Checkpoint - +<>delete_checkpoint(provider_id:String, checkpoint_id:String):void - -protect_operation(backup_plan:BackupPlan):void - -restore_operation(backup_plan:BackupPlan):void - -workFlowEngine:WorkFlowEngine -} - -RpcServer*-right->ProtectionManager:has many as endpoints - -class WorkFlowEngine { - +build_task_flow(backup_plan:BackupPlan, provider:ProtectionProvider):flow:taskflow.flow.Flow - +execute(executor_type:String, flow:taskflow.flow.Flow):void -} - -ProtectionManager*->WorkFlowEngine:has one - -class taskflow.engines.action_engine.engine.ParallelActionEngine { - ... -} - -WorkFlowEngine*-up->taskflow.engines.action_engine.engine.ParallelActionEngine:load one - -class taskflow.patterns.graph_flow.Flow { - ... -} - -WorkFlowEngine -right-> taskflow.patterns.graph_flow.Flow:generate one per operation execution - -class taskflow.task.Task { - ... -} - -taskflow.patterns.graph_flow.Flow->taskflow.task.Task:composed by many - -class CreateCheckpointTask extends taskflow.task.Task { - -backup_plan:BackupPlan - -checkpoint_collection:CheckpoinCollection - +execute(): void - +revert(): void -} - -class SyncCheckpointStatusTask extends taskflow.task.Task{ - -checkpoint:Checkpoint - -checkpoint_collection:CheckpoinCollection - +execute(): void - +revert():void -} - -interface CheckpointCollectionInterface { - +list(list_options:dict): []Checkpoints - +show(checkpoint_id:String): Checkpoint - +delete(checkpoint_id:String):void - +create(plan:ProtectionPlan): Checkpoint - +update(checkpoint:Checkpoint, kwargs:{}): void -} - -class CheckpointCollection implements CheckpointCollectionInterface{ - -bank_plugin:BankPluginInterface - +init(bank_plugin:BankPlugin):void - ..checkpoint functions.. - ... - } - -CheckpointCollection*-down->BankPlugin:has one - -class ProviderRegistry{ - +list_providers(list_options:{}):[]ProtectionProvider - +show_provider(provider_id:String):ProtectionProvider - -load_providers(cfg_file:String):void -} - -ProtectionManager*-down->ProviderRegistry:has a - -interface ProtectionProvider { - +build_task_flow(backup_plan:BackupPlan, action:Action):taskflow.patterns.graph_flow.Flow -} - -ProviderRegistry "1"*-down->"many" ProtectionProvider:manage - -ProtectionProvider-right->taskflow.patterns.graph_flow.Flow:generates - -taskflow.patterns.graph_flow.Flow->taskflow.patterns.graph_flow.Flow:composed by - -class PluggableProtectionProvider implements ProtectionProvider{ - +get_protection_plugin(protectable_type:ProtectableType):ProtectionPluginInterface - +get_bank_plugin():BankPlugin - +get_checkpoint_collection():CheckpointCollection - +build_task_flow(backup_plan:BackupPlan): taskflow.patterns.graph_flow.Flow - -load_plugin(cfg_file:String):void - -bank_plugin:BankPluginInterface - -plugins:{ProtectableType:ProtectionPluginInterface} - -checkpoint_collection:CheckpointCollection -} - -interface ProtectionPluginInterface { - ..getter functions.. - +get_supported_resources_types(): []ResourceType - ..protect action functions.. - +get_protection_status(protection_id:String):Enum - ..graph walk functions.. - +on_resource_start(context: Context) - +on_resource_end(context: Context) - ..schema functions.. - +get_options_schema(resource_type: ResourceType) - +get_saved_info_schema(resource_type: ResourceType) - +get_restore_schema(resource_type: ResourceType) - +get_saved_info(resource: Resource) -} - -class ProtectionPlugin implements ProtectionPluginInterface { - -protectable_type:String - -schema:[]String - ..getter functions.. - ... - ..protect action functions.. - ... - ..graph walk functions.. - ... - ..schema functions.. - ... -} - -PluggableProtectionProvider "1" *-left->"many" ProtectionPlugin:aggregates - -Interface BankPluginInterface { - +chroot(context:dict):void - +create_object(key:String, value:Object):void - +update_object(key:String, options:dict, value:Object):void - +show_object(key:String):dict - +get_object(key:String):dict - +delete_object(key:String):void - +list_objects(options:dict):void - +acquire_lease(): void - +renew_lease(): void - +check_lease_validity():bool -} - -class BankPlugin implements BankPluginInterface { - -stroage_url:URL - -context:dict - -owner_id:String - -expired_time:Long - -renew_time:Long - ..object functions.. - ... - ..lease functions.. - ... -} - -PluggableProtectionProvider "1" *-down->"1" CheckpointCollection:has a - -class ProtectionData { - +protection_id:String - +protection_target:RestoreTarget - +status:Enum -} - -ProtectionPlugin -up->ProtectionData: create one -@enduml \ No newline at end of file diff --git a/doc/source/specs/protection-service/class_diagram.pu b/doc/source/specs/protection-service/class_diagram.pu deleted file mode 100644 index 5377b8e7..00000000 --- a/doc/source/specs/protection-service/class_diagram.pu +++ /dev/null @@ -1,85 +0,0 @@ -@startuml - -title Restore Class Diagram - -interface ProtectionPlugin { - ..metadata functions.. - get_supported_resources_types(): []ResourceType - ..graph walk functions.. - +on_resource_start(context: Context) - +on_resource_end(context: Context) - ..schema functions.. - +get_options_schema(resource_type: ResourceType) - +get_saved_info_schema(resource_type: ResourceType) - +get_restore_schema(resource_type: ResourceType) - +get_saved_info(metadata_store: MetadataStore, resource: Resource) -} - -class BaseProtectionPlugin implements ProtectionPlugin { - ..graph walk functions.. - +on_resource_start(context: Context) - +on_resource_end(context: Context) - ..Protection functions.. - +create_backup(self, cntxt:Context, checkpoint:CheckPoint, **kwargs) - +restore_backup(self, cntxt:Context, checkpoint:CheckPoint, **kwargs) - +delete_backup(self, cntxt:Context, checkpoint:CheckPoint, **kwargs) - +resume(self, cntxt:Context, checkpoint:CheckPoint, **kwargs) - +promote(self, cntxt:Context, checkpoint:CheckPoint, **kwargs) -} - -class HeatResource { - ..properties .. - -resource_id:String - -type:enum - -properties:dict - -metadata:dict - ..init function.. - +_init(resource_id:String, type:enum):void - ..property setters.. - +set_property(key:String, value:Object):void - +set_metadta(key:String, value:Object):void - ..print functions.. - +toDict():dict -} - -class HeatParameter { - ..properties.. - -value:Object - ..init function.. - +_init(value:Object):void - ..getters.. - +get_value():Object -} - -class HeatTemplate { - ..properties.. - +heat_template_version:String - +description:String - -resources:[]HeatResource - -original_id_resource_map:{String:HeatResource} - -original_id_parameter_map:{String:HeatParameter} - ..functions.. - put_parameter(original_id:String, parameter:HeatParameter):void - put_resource(original_id:String, resource:HeatResource):void - get_resource_reference(original_id:String):Object - ..printer function.. - toDict():dict - dumpToYamlFile(file:File):String -} - -class Task { -+execute(kwargs) -} - -Task "N" -down-> "1" HeatTemplate: share as input - -Task "1"-left-> "N" HeatResource: create - -Task "1" -right-> "N" HeatParameter: create - -HeatTemplate "1" *-up-> "N" HeatResource: aggregates - -HeatTemplate "1" *-up-> "N" HeatParameter: aggregates - -BaseProtectionPlugin "1" --> "N" Task: create -@enduml \ No newline at end of file diff --git a/doc/source/specs/protection-service/protect-rpc-call-seq-diagram.pu b/doc/source/specs/protection-service/protect-rpc-call-seq-diagram.pu deleted file mode 100644 index dc401bed..00000000 --- a/doc/source/specs/protection-service/protect-rpc-call-seq-diagram.pu +++ /dev/null @@ -1,24 +0,0 @@ -@startuml - -title create_checkpoint - API RPC call Sequence Diagram - -Karbor_API_Service ->> ProtectionManager :create_checkpoint(backup_plan, protect) -ProtectionManager -> WorkflowEngine:built task flow -ProtectionManager -> WorkflowEngine: execute task flow -WorkflowEngine -> CreateCheckpointTask:execute() -CreateCheckpointTask -> Checkpoints : create_checkpoint() -Checkpoints -\ BankPlugin : check_lease_validity(owner_id) -Checkpoints -\ BankPlugin : put(checkpoint_key, value) -Checkpoints -\ BankPlugin : build indexes, put(index_key, value) -WorkflowEngine -> ResourceProtectTask:execute() -ResourceProtectTask -\ ProtectionPlugin : protect(protectable) -ResourceProtectTask -> Checkpoints : create_protection_definition (checkpoint, protectable) -Checkpoints -\ BankPlugin : put(protection_definition_key, value) -WorkflowEngine -> SyncCheckpointStatusTask :execute() -SyncCheckpointStatusTask -\ProtectionPlugin : get_protection_status(protectable) -ProtectionPlugin --\ SyncCheckpointStatusTask : Return protection status -SyncCheckpointStatusTask -> Checkpoints : update_protection_definition (checkpoint, protectable, {'status':finished}) -Checkpoints -\ BankPlugin : put(protection_definition_key, value) -SyncCheckpointStatusTask ->Checkpoints : update_checkpoint(checkpoint, {'status':finished}) -Checkpoints -\ BankPlugin : put(checkpoint_key, updated_value) -@enduml \ No newline at end of file diff --git a/doc/source/specs/protection-service/protection-service.rst b/doc/source/specs/protection-service/protection-service.rst deleted file mode 100644 index 8b9c4307..00000000 --- a/doc/source/specs/protection-service/protection-service.rst +++ /dev/null @@ -1,150 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================= -Protection Service Basics -========================= - -https://bugs.launchpad.net/karbor/+bug/1529199 - -Protection Service is a component of karbor (an openstack project working as a -service for data protection), which is responsible to execute -protect/restore/other actions on operations (triggered plans). - -Architecturally, it acts as a RPC server role for karbor API service to actually -execute the actions on triggered operations. - -It's also the role who actually cooperates with protection plugins provided by -providers. It will load providers (composed by a series of plugins) and thus -manage them. - -Internally, protection service will construct work flow for each operation -action execution, where tasks in work flow will be linked to a graph by -resource dependency and thus be executed on parallel or linearly according to -the graph task flow. - -RPC interfaces -============== - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/protection-service/protection-architecture.png - -From the module graph, protection service basically provide following RPC -calls: - -Operation RPC: --------------- -**execute_operation(backup_plan:BackupPlan, action:Action):** where action -could be protect or restore - -Provider RPC: -------------- -**list_providers(list_options:dict): []Providers:** - -**show_provider(provider_id:String}:Provider** - -Checkpoint RPC: ---------------- - -**list_checkpoints(list_options:{}): []Checkpoints** - -**show_checkpoint(provider_id:String, checkpoint_id:String): Checkpoint** - -**delete_checkpoint(provider_id:String, checkpoint_id:String):void** - -Main Concept -============ -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/protection-service/class-diagram.png - - -Protection Manager ------------------- -Endpoint of the RPC server, which will handle Operation RPC calls and dispatch -other RPC calls to corresponding components. - -It will produce a graph work flow for each operation execution, and have the -work flow to be executed through its work flow engine. - -ProviderRegistry ----------------- - -Entity to manage multiple providers, which will load provider definitions on -init from config files and maintain them in memory map. - -It will actually handle RPC related to provider management, like -list_providers() or show_provider(). - -CheckpointCollection --------------------- - -Entity to manage checkpoints, which provides CRUD interfaces to handle -checkpoint. As checkpoint is a karbor internal entity, one checkpoint operation -is actually composed by combination of several BankPlugin atomic operations. - -Take create_checkpoint as example, it will first acquire write lease (there -will be detailed **lease** design doc) to avoid conflict with GC deletion, then -it needs create key/value for checkpoint itself. After that, it will build -multiple indexes for easier list checkpoints. - -Typical scenario -================ -A typical scenario will start from a triggered operation being sent through RPC -call to Protection Service. - -Let's take action protect as the example and analyze the sequence together with -the class graph: - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/protection-service/protect-rpc-call-seq-diagram.png - -1. Karbor **Operation Engine** ------------------------------- -who is responsible for triggering operation according to time schedule or -events, will call RPC call of Protection Service: -execute_operation(backup_plan:Bac,upPlan, action:Action); - -2. ProtectionManager --------------------- -who plays as one of the RPC server endpoints, and will handle this RPC call by -following sequence: - -2.1 CreateCheckpointTask: -^^^^^^^^^^^^^^^^^^^^^^^^^ -This task will be the start point task of the graph flow. This task will call -the unique instance of class -**Checkpoints**:create_checkpoint(plan:ProtectionPlan), to create one -checkpoint to persist the status of the action execution. - -The instance of **Checkpoints** will retrieve the **Provider** from input -parameter **BackupPlan**, and get the unique instance of **BankPlugin**. - -While **BankPlugin** provides interfaces for CRUD key/values in **Bank** and -lease interfaces to avoid write/delete conflict, **Checkpoints** is responsible -for the whole procedure of create checkpoint, including grant lease, -create key/value of checkpoint, build indexes etc. through composing calls to -**BankPlugin** - -2.2 Call ProtectionProvider to build the resource flow -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This task is built by walking through **resource tree** (see -**Pluggable protection provider** doc), which will return a graph flow. -The result graph flow is composed of tasks representing the activities of the -ProtectionPlugin for each resource, and the links between the tasks according -to the activities type, and resource dependencies. - -The graph flow returned by ProtectionProvider would be added to the top layer -task flow, right behind the start point task **CreateCheckpointTask**, and will -be executed with parallel engine. - -The protection plugin is responsible for storing the ProtectionData (backup -id, snapshot id, image id, etc) into the Bank under the corresponding -**ProtectionDefinition**. - -2.3 CompleteCheckpointTask -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This task is added into the top layer task flow right after the task flow built -form ProtectProvider, which will be executed only when all tasks ahead of it -have been completed successfully. This task will update the checkpoint status -to be available, and commit it to the bank. diff --git a/doc/source/specs/protection-service/restore-design-spec.rst b/doc/source/specs/protection-service/restore-design-spec.rst deleted file mode 100644 index 53ac1e00..00000000 --- a/doc/source/specs/protection-service/restore-design-spec.rst +++ /dev/null @@ -1,227 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================== -Restore design spec (protection service level) -============================================== - -https://bugs.launchpad.net/karbor/+bug/1560826 - -Protection Service is a component of karbor (an openstack project working as a -service for data protection), which is responsible to execute -protect/restore/other actions on operations (triggered plans). The restore -functionality of protection service is basically about 4 aspects: - -1. Restore from what point of data - -2. How those resources will look like after restoration - -3. In which way we will organize the restoration work. - -4. How to watch restoration procedure. - -The most important assumption we hold here is that the bank of karbor, which -holds our protection data, is high available and reliable. - -Restore from what point -======================= - -In `document protection service design `_ -, we have described the procedure to protect resource, where for each protection -plan execution, we will persist a checkpoint in bank. - -If the checkpoint is in status available, the checkpoint is qualified to be a -foundation where we can build our restoration. - -Checkpoint including following data: - -Plan: ------ -This item is the plan which used to be executed and thus produced this -checkpoint. - -Resource dependency graph: --------------------------- -The resource -dependency graph describes the resource stack set in the plan, and the dependency -among them and their sub resources. - -This resource dependency graph will help us to check the resources dependency in -retrospect. - -This view is critical since the dependency may vary, e.g., the volume could be -attached or detached to a server time by time. However, what we aim to rebuild -is the resources stack with same/similar dependency of the original resource -stack at the time point of protection. - -Resource definition data: -------------------------- -Resource definition data is the data defined and persisted by each protection -plugin, where protection plugin could persist the metadata of the protection -resource, say, backup id, or the original resource, even the data to be backed -up/replicated. - -Those resource definition data could be retrieved during restoration, and could -be parameters to rebuild our resources stack. - -Restore to what -=============== -The target of restoration is to rebuild the resources stack, which are explicitly - set or implied in the protection plan. - -It means that the resources stack to be protected and rebuilt not only includes -the target resources explicitly set in the protection plan, but also includes -those resources which the target resources depend on. - -The karbor protection service will call protection plugin to build the resource -stack in the order of the dependencies described by the resource graph -(persisted in checkpoint as mentioned above). - -However, for each kind of resource, to keep what unchanged but what changed is -not the responsibility of karbor protection service. It's the implementation of -each protection plugin who is free to define their own rules. Say, one server -protection plugin may require to keep fixed ip unchanged after restoration, and -another server protection plugin may require to keep the attachment device path -of one volume to be the same, etc. Those requirements could be met in the -implementation of the concrete server protection plugin. - -The procedure of building openstack resource stack is aligned with openstack -heat service. To avoid repeating development work, for now, karbor adopts the -way to generate the heat template (HOT) as the restore intermediate target. -Karbor restore API enables user to specify the file path to export heat template, -and karbor protection service will generate heat template according to protection -data, and will export it to the specified file path. - -How to restore -============== -Based on our BaseProtectionPlugin, protection plugin implementation with -single task doesn't need care about task flow building but only needs implement -the restore() function. - -options to implement ProtectionPlugin restore() ------------------------------------------------ - -Basically, the standard protection plugin restore is to generate heat resource -in memory, but we also tolerates some other backup protection plugin which -doesn't rely on standard openstack API to create resources. In this way, the -restore function may produce resources directly instead of by heat. - -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/protection-service/class_diagram.png - -Generally, each restore task will share an injected parameter: an instance of -HeatTemplate class. It's created per restore request, and will manage the in -memory heat template, which will aggregate the in memory HeatParameter instances -and in memory HeatResource instances produced by each restore task. - -To tolerate non-standard openstack API based protection plugin, there're two -options to implement restore() function: - -**1. Restore() to build in memory HeatParameter instance(s):** - -The restore() function will directly build the corresponding resource,wait -until it to be available synchronously. It then encapsulate the built resource -into HeatParameter object and call heatTemplate.put_parameter(original_id, -heatParameter) to put it for its parent task reference. The original_id is -the resource id of the protected resource, where parent task could refer it -through this id. - -**2. Restore() to build in memory HeatResource instance(s):** - -The restore() function won't build resource directly, but only encapsulate an in -memory HeatResource object with protected data as parameter, or refer its -children HeatResource/HeatParameter. Same as option 1, it will call -heatTemplate.put_parameter(original_id, heatParameter) to put it for its -parent task reference. The original_id is the resource id of the protected -resource, where parent task could refer it through this id. - -Note for **composite resource protection plugin**, say, Network protection plugin, -which is represented as single resource node in resource graph. However, it -actually builds multiple resources inside its restore() call. It's required to -generate multiple HeatResource/HeatParameter instances in memory and put -them to shared input HeatTemplate instance. - -How to handle resource references between restore tasks: --------------------------------------------------------- - -**1. Parent node takes care of attachment** - -As the resource graph generated during protection, the parent node should -take care of the attachment of its children resources. Say, it's server -protection plugin's work to create attachment resource to attach volumes. - -**2. Task flow engine ensures ordering of reference** - -Our in memory HeatResource/HeatParameter instances are built based on the -resource graph, and thus even with on parallel task execution, it's guaranteed -by task flow engine that children tasks will be executed first. Thus the -children HeatResource/HeatParameter instances will be put into an internal -collection before HeatResource/HeatParameter instances produced by parent task. - -**3. Refer child resource by original resource id** - -To implement restore() function, each resource needs refer their new built -children resources, either by get_param or by get_resource. As each HeatParam -and HeatResource instance is put into HeatTemplate instance, indexed by the -original id (protected resource id), parent task could refer its children -HeatParam/HeatResource through the original resource id: by calling -HeatTemplate.get_resource_reference(original_id:String), which will return the -reference object, which could be a resource_id (String) or a dict ({ -get_resource: resource_id}). Note here we give up standard requires/provides to -pass input/output among tasks, since for composite resource like Network, the -HeatParameter/HeatResource it produces is not corresponding to the resource node -it presents.) - -**4. Limitation of child resource reference** - -If the parent resource protection plugin adopts option 1 to rebuild -resource, and if its child resource protection plugin chooses to follow option2 -to rebuild resource, one limitation here is that the parent resource protection -plugin may have no way to refer its child resource since the child resource -won't get generated during the life time of the task. -Considering this limitation, the protection plugin with option1 implementation -could choose to extend heat resource to include its own resource building logic. - -work flow of restoration: -------------------------- -.. image:: https://raw.githubusercontent.com/openstack/karbor/master/doc/images/protection-service/restore-processing-sequence-flow.png - - -1. User calls API to specify restore from one checkpoint and other restore -params (export heat template file path, external network etc.). - -2. In protection service, we retrieve the resource graph from checkpoint; - -3. Walk through the resource graph and thus build the task flow of restoration; - -4. Execute the restoration task flow, which will dump HeatTemplate with pyyaml -to a temporary file. The file object iss the output of the task graph; - -5. Protection service will construct a task dependent on task graphs on step3, -which will be executed to take the heat template as input. It will call heat -client to execute this template. - -6. There could be another task to track restoration status as well. - -How to restore between two unsymmetrical openstack sites(TBD) -============================================================= -Unsymmetrical caseincluding unsymmetrical physical network, vlan to vxlan, -different server flavor, different volume type, etc. - -The basic idea is the protection plugin is free to generate template according -to the target site status. It could check target site status through openstack -API or config file, and karbor could define some rules to adapt one world to -another. - -Restore heat stack managed resources(TBD) -========================================= -The basic idea here is to iterate the -original source template, and look up corresponding resource in protection -checkpoint, and thus rebuild the source template with checkpoint data. In this -way, the rebuilt resource are still managed by heat stack. - -How to watch restoration procedure(TBD) -======================================= -The basic idea is to watch corresponding heat stack. diff --git a/doc/source/specs/quotas.rst b/doc/source/specs/quotas.rst deleted file mode 100644 index d47b4c04..00000000 --- a/doc/source/specs/quotas.rst +++ /dev/null @@ -1,300 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================== -Add Quotas to Karbor -==================== - -https://blueprints.launchpad.net/karbor/+spec/support-quotas-in-karbor - -Problem description -=================== - -To prevent system capacities from being exhausted without notification, users can -set up quotas. Quotas are operational limits. -For example, the number of gigabytes allowed for each project can be controlled so -that cloud resources are optimized in Block Storage service(Cinder)[1] . Quotas can -be enforced at the project level. Nova uses a quota system for setting limits on -resources such as number of instances or amount of CPU that a specific project or -user can use. [2] - -A quotas system will be introduced to Karbor for setting limits on resources such -as the amount of gigabytes about backup data that a specific project can use. - -Use Cases -========= - -User can set limits on resources such as the amount of gigabytes about backup data -via a new quotas RESTful API. - - -Proposed change -=============== -1. Two data modules about quotas will be introduced to karbor. - - The data module 'quotas' is used for saving the hard limit number of the resources - that a specific project can use. - The data module 'quota_usages' is used for saving the in use number and reserved - number of the resources that belong to a specific project. - - -2. Add the quotas API controller for the Karbor API. - - Implement the 'update' method of quotas API controller. - Implement the 'show' method of quotas API controller. - Implement the 'index' method of quotas API controller. - -2. The resources need to limit. - - QUOTAS_PLAN_CAPACITY = 'quota_plans' - - QUOTAS_PLAN_CAPACITY: The maximum number of plan. - -3. Init the default limit number of the resources about quotas data module. - - The default limit number of the resources should be inited in the data module - table 'quotas' when the karbor api service start to run. - The default limit number of the resources can be set from the value of configurations. - The config 'quota_plans' need be added. - - -4. Update the reserved and in use number of the resources quota usages. - - When a resource is requested, the specific quota about this resource is checked first. - If this check passes, the reserved number of this resource in data module 'quota_usages' - need be updated first according to requested resource number. If the resource is - created successfully, the in use number of the resources quota need be updated. At the - same time, the reserved number of this resource should be subtracted. - - -Alternatives ------------- - -None - -Data model impact ------------------ -1. quotas - -+-------------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------------------+--------------+------+-----+---------+-------+ -| id | varchar(36) | NO | PRI | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| project_id | varchar(255) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resource | varchar(255) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| hard_limit | Interger | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ - -2. quota_usages - -+-------------------------+--------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------------------+--------------+------+-----+---------+-------+ -| id | varchar(36) | NO | PRI | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| project_id | varchar(255) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| resource | varchar(255) | NO | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| in_use | Interger | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| reserved | Interger | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| created_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| updated_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ -| deleted_at | Datetime | YES | | NULL | | -+-------------------------+--------------+------+-----+---------+-------+ - -REST API impact ---------------- - -1. Update quotas API, admin only. -The request JSON when updating a quota:: - - **post** : /v1/{project_id}/quotas/{user_project_id} - ```json - { - "quota": - { - "plans": 100 - } - } - - -The response JSON when updating a quota:: - - ```json - { - "quota":{ - "plans": 100 - } - } - - - -2. Show quota API. Admin can query aother projects' quota. -The response JSON when showing a quota:: - - **get** : /v1/{project_id}/quotas/{user_project_id} - ```json - { - { - "quota": { - "plans": 100, - "id": "73f74f90a1754bd7ad658afb3272323f" - } - } - } - - -3. Delete quota API. admin only. -The response JSON when deleting a quota:: - - **delete** : /v1/{project_id}/quotas/{user_project_id} - -4. Show the detail of quota API. Admin can query aother projects' quota. -The response JSON when showing a quota:: - - **get** : /v1/{project_id}/quotas/{user_project_id}/detail - ```json - { - "quota": { - "plans": { - "reserved": 0, - "limit": 100, - "in_use": 1 - }, - "id": "73f74f90a1754bd7ad658afb3272323f" - } - } - -5. Show the default of quota API. Admin can query aother projects' quota. -The response JSON when showing a quota:: - - **get** : /v1/{project_id}/quotas/{user_project_id}/defaults - ```json - { - "quota": { - "plans": 50, - "id": "73f74f90a1754bd7ad658afb3272323f" - } - } - - -6. Update quota class API, admin only. -The request JSON when updating a quota class:: - - **post** : /v1/{project_id}/quota_classes/{class_name} - ```json - { - "quota_class": { - "plans": 120 - } - } - - -The response JSON when updating a quota class:: - - ```json - { - "quota_class": { - "plans": 120 - } - } - - -7. Show quota class API. -The response JSON when showing a quota class:: - - **get** : /v1/{project_id}/quota_classes/{class_name} - ```json - { - "quota_class": { - "plans": 120, - "id": "default" - } - } - - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Add a new RESTful API about quotas -* Add database data module of quotas -* Add quotas API to karbor client - -Dependencies -============ - - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -None - -References -========== - -[1] https://docs.openstack.org/horizon/latest/admin/set-quotas.html - -[2] https://docs.openstack.org/nova/latest/user/quotas.html - diff --git a/doc/source/specs/refactor_clients.rst b/doc/source/specs/refactor_clients.rst deleted file mode 100644 index 01e20e04..00000000 --- a/doc/source/specs/refactor_clients.rst +++ /dev/null @@ -1,147 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================ -Refactor the clients used in protect service -============================================ - -https://blueprints.launchpad.net/karbor/+spec/refactor-clients - -Problem description -=================== - -As the bug[1] said, the user token may expire during the process of protection -when using it directly to access other openstack services. - -Use Cases -========= - -In protection service, both protect and restore operations use user token passed -by context to create clients of other openstack services and access them by that -client. It may fail to access other services because of the expiration of user -token. - -Proposed change -=============== - -Recently, Keystone has merged a new spec[2] that resolves the issue of token -expiration which happens in access between multiple openstack services. Simply, -the principle is like this. When the Keystone Middleware validates the user -token, it will check the service token first. If it is passed and valid, then -Keystone allows user token to be fetched even if it is expired unless the time -exceeds the max window time which is set in Keystone and the default value is -48 hours. - -It fixes Karbor's issue perfectly. According to that spec, Karbor can access -other openstack services successfully for 48 hours which is enough to finish all -the protect/restore works. There are some changes to create and use the clients -of other services before using that new mechanism. - -1. create client - The client may be created like this - -.. code-block:: python - - def create(context): - # user_auth_plugin: created by context, which stores the user token. - # service_auth_pluing: created and initialized by service information of - # Karbor which are registered to Keystone - auth_plugin = service_token.ServiceTokenAuthWrapper( - user_auth_plugin, service_auth_plugin) - - session = session.Session(auth=auth_plugin, verify=verify) - - # endpoint: the public url of cinder - client = cinderclient.Client('3', session=session, - endpoint_override=endpoint) - -2. use client - The client can be created once and used all the time until the max expiration - time(48h). - -Alternatives ------------- - -The 'trust' mechanism of Keystone can solve this issue. But it is a bit more -complex than the new one. - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -It may spend some time to apply a new service token from Keystone -if it is expired when using the client to send the request each time. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -The developers of protect plugins should know these changes. - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* refactor all clients of other openstack services been using - in protect service. - -Dependencies -============ - -It depends on all the patches[3] of Keystone to be merged. - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -None - -References -========== - -[1] https://bugs.launchpad.net/karbor/+bug/1566793 -[2] https://specs.openstack.org/openstack/keystone-specs/specs/keystone/ocata/allow-expired.html -[3] https://review.opendev.org/#q,topic:bp/allow-expired,n,z diff --git a/doc/source/specs/remove_heat.rst b/doc/source/specs/remove_heat.rst deleted file mode 100644 index 068b7747..00000000 --- a/doc/source/specs/remove_heat.rst +++ /dev/null @@ -1,136 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========== -Remove Heat -=========== - -https://blueprints.launchpad.net/karbor/+spec/remove-heat - -Problem description -=================== - -As it stands, Karbor uses Heat to restore the resources which had been protected -before. Although it works well, some disadvantages are still very obvious. Firstly, -Karbor just uses a fraction of functions which Heat supplies so that it seems too -heavy to Karbor. Second, for developers of protection plugins, they prefer implementing -the restoration of resources by protection plugin itself to Heat. Third, both Karbor -and Heat should be deployed at the time, which will add more workload. Last, from -the point of view of implementation, Heat stack runs after all the protection -plugins' hooks which breaks the hook definition of 'on_complete'. - - -Use Cases -========= - -* Implement restoration of resources by protection plugins themselves. -* No longer deploy Heat. - - -Proposed change -=============== - -There are two main changes. First one, the implementation of restore should be -refactored for all protection plugins. At present, there are 4 kinds of plugins -in Karbor and the new restore methods are described as below respectively. - -* volume plugin - It will create a new volume with original backup volume. - -* image plugin - It will create a new image and upload the original backup data to it. - -* network plugin - It will create a new network with original backup network. - -* vm plugin - It is a bit more complex, because it has several child resources, such as volume, ip. - First, it should create a new vm. Second, add the child resources to it, such as - attach the volumes, set the ip. - -Another change is updating the deployment scripts which will no longer install Heat. - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -There should be no loss on performance, because it does same work as Heat actually. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -The developers of protection plugins should know these changes. - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* refactor all protection plugins -* update the deployment scripts -* update the document of developing protection plugin - -Dependencies -============ - -None - -Testing -======= - -Unit and fullstack tests in Karbor. - - -Documentation Impact -==================== - -Documents about how to develop protection plugin should be updated also. - -References -========== - -None diff --git a/doc/source/specs/restore-resource-status.rst b/doc/source/specs/restore-resource-status.rst deleted file mode 100644 index 8eb0e8ea..00000000 --- a/doc/source/specs/restore-resource-status.rst +++ /dev/null @@ -1,96 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================= -Restore Resource Status -======================= - -https://blueprints.launchpad.net/karbor/+spec/restore-resource-status - -Protection plugin should be able to set the status of a restoring resource -during a restore operation. By doing so, users gain visiblity of the restore -process: - -- Resources currently being protected -- Resources restored successfully -- Resources whose restore has failed, and the reason for the failure - - -Problem description -=================== - -Use Cases ---------- - -- Giving visibility for the restore process -- Exposing the user to the reason for the failure of a resource restore - -Proposed Change -=============== - -- Add 'resource_status' and 'resource_reason' dictionaries to the Restore object -- Add 'update_resource_status' method to the Restore object -- Protection Plugin should use the 'update_resource_status' to set the status of - each resource during restore operation - - -Alternatives ------------- - -By not adding this, users will have no visibility to resource status during and -after restore operation. - -Data model impact ------------------ - -- Add 'resource_status' dictionary to Restore object: represents the status of - the restoring/restored resource -- Add 'resource_reason' dictionary to Restore object: free text representing the - reason for the restore failure of the resource - -REST API impact ---------------- - -- 'resource_status' dictionary and 'resource_reason' dictionary are added to the - Restore object - -Security impact ---------------- - -Validation should be imposed on the status set by plugins, and on the reason -text. - -Other end user impact ---------------------- - -python-karborclient and karbor-dashboard should consume the new fields of the -Restore object. - -Performance Impact ------------------- - -Calling 'update_resource_status' sets values in the database which should have -a slight impact on performance. - - -Other deployer impact ---------------------- - -Protection plugins should use the new API to set the resource status. - -Implementation -============== - - -Testing -======= - - -Documentation Impact -==================== - -- Add 'resource_status' and 'resource_reason' to Restore object -- Add 'update_restore_status' to Protection Plugin writing documentation diff --git a/doc/source/specs/s3-bank.rst b/doc/source/specs/s3-bank.rst deleted file mode 100644 index 0106c86c..00000000 --- a/doc/source/specs/s3-bank.rst +++ /dev/null @@ -1,143 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -File System based Bank implementation -========================================== - -https://blueprints.launchpad.net/karbor/+spec/s3-bank-plugin - -Problem description -=================== - -Currently we suppport Swift and File System as bank implementations. We -should increase more bank plugin types so that users will have more choices -to feet their needs in different scenarios. - -S3 compatible storage is a valid choice, which is used by many individuals -and companies in the public or private clouds. With S3 based implementation, -it will store objects and object metadata on S3 compatible Storage. - -Use Cases -========= - -As explained, deployers who want or will use S3 compatible storage in their -cloud. - -Proposed change -=============== - -Objects would be stored under a object name with their ID having `/` be -defined as a separator. - -For example:: - Object ID: /checkpoints/2fd14f87-46bd-43a9-8853-9e1a84ebee3d/index.json - -The metadata files will be in a JSON format. The name and format of these -files are same as the meatadata objects in the Swift bank. - -For example:: - /checkpoints/3a4d76e7-f8d8-4f2f-9c1d-107d88d7a815/ <- directory - /checkpoints/3a4d76e7-f8d8-4f2f-9c1d-107d88d7a815/metadata <- md file - /checkpoints/3a4d76e7-f8d8-4f2f-9c1d-107d88d7a815/status - - -Alternatives ------------- - -Do nothing, this is not a mission critical feature. - - -Technical details ------------------ - -Related docs: - -Amazon S3 REST API Introduction -* http://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html - -The python client module that could be used is botocore -* https://github.com/boto/botocore - -Data model impact ------------------ - -None. - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -This API might be faster\slower than Swift depending on use case. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -Pengju Jiao - -Work Items ----------- - -* Write Bank Plugin -* Add documentation - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -New docs to explain how to use and configure the alternative Bank -implementation. - - -References -========== - -None diff --git a/doc/source/specs/service-management-api.rst b/doc/source/specs/service-management-api.rst deleted file mode 100644 index 88e028a8..00000000 --- a/doc/source/specs/service-management-api.rst +++ /dev/null @@ -1,166 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================== -Add service management API to Karbor -==================================== - -https://blueprints.launchpad.net/karbor/+spec/karbor-service-management - -Problem description -=================== - -Currently, karbor does not have service management API for karbor services -(karbor-operationengine and karbor-protection). We can find that service -management API has been in almost all the other OpenStack projects. It is very -convenient for admin to list/enable/disable the services on any nodes. - -Use Cases -========= - -Admin want to list/enable/disable karbor services on any karbor nodes. - -Proposed change -=============== -1. Add service management API controller for the Karbor API. - - Implement the 'index' method of service management API controller. - Implement the 'update' method of service management API controller. - -2. Add service management to karbor client. - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -1. List services API -The response JSON when listing services:: - - **get** : /v1/{project_id}/os-services - ```json - { - "services": [ - { - "status": "enabled", - "binary": "karbor-operationengine", - "disabled_reason": null, - "host": "karbor@node", - "updated_at": "2017-09-07T13:03:57.000000", - "state": "up", - "id": 1 - }, - { - "status": "enabled", - "binary": "karbor-protection", - "disabled_reason": null, - "host": "karbor@node", - "updated_at": "2017-09-07T13:03:57.000000", - "state": "up", - "id": 2 - } - ] - } - - -2. Update service API -The request JSON when updating service:: - - **put** : /v1/{project_id}/os-services/{service_id} - ```json - { - "status": "enable" - } - - -The response JSON when updating service:: - - ```json - { - "service": { - "id": "e81d66a4-ddd3-4aba-8a84-171d1cb4d339", - "binary": "karbor-protection", - "disabled_reason": null, - "host": "karbor@node", - "state": "up", - "status": "enabled", - "updated_at": "2012-10-29T13:42:05.000000", - } - } - - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -Jiao Pengju - -Work Items ----------- - -* Add a new RESTful API about service management -* Add service management to karbor client - -Dependencies -============ - -None - -Testing -======= - -Unit tests in Karbor. - -Documentation Impact -==================== - -None - -References -========== - -None diff --git a/doc/source/specs/skeleton.rst b/doc/source/specs/skeleton.rst deleted file mode 100644 index 7ffc364d..00000000 --- a/doc/source/specs/skeleton.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Title of your RFE -========================================== - - -Problem Description -=================== - - -Proposed Change -=============== - - -References -========== - - diff --git a/doc/source/specs/template.rst b/doc/source/specs/template.rst deleted file mode 100644 index c9852e67..00000000 --- a/doc/source/specs/template.rst +++ /dev/null @@ -1,170 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================== -Example Spec - The title of your RFE -==================================== - -Include the URL of your launchpad RFE: - -https://bugs.launchpad.net/karbor/+bug/example-id - -Introduction paragraph -- why are we doing this feature? A single paragraph of -prose that **deployers, and developers, and operators** can understand. - -Do you even need to file a spec? Most features can be done by filing an RFE bug -and moving on with life. In most cases, filing an RFE and documenting your -design is sufficient. If the feature seems very large or contentious, then -you may want to consider filing a spec. - - -Problem description -=================== - -A detailed description of the problem. What problem is this blueprint -addressing? - -Use Cases ---------- - -What use cases does this address? What impact on actors does this change have? -Ensure you are clear about the actors in each use case: Developer, End User, -Deployer etc. - -Proposed Change -=============== - -How do you propose to solve this problem? - -This section is optional, and provides an area to discuss your high-level -design at the same time as use cases, if desired. Note that by high-level, -we mean the "view from orbit" rough cut at how things will happen. - -This section should 'scope' the effort from a feature standpoint: how is the -'Karbor end-to-end system' going to look like after this change? What Karbor -areas do you intend to touch and how do you intend to work on them? - - -Alternatives ------------- - -What other ways could we do this thing? Why aren't we using those? This doesn't -have to be a full literature review, but it should demonstrate that thought has -been put into why the proposed solution is an appropriate one. - -Data model impact ------------------ - - -REST API impact ---------------- - - -Security impact ---------------- - -Describe any potential security impact on the system. Some of the items to -consider include: - -Other end user impact ---------------------- - -Performance Impact ------------------- - -Describe any potential performance impact on the system, for example -how often will new code be called, and is there a major change to the calling -pattern of existing code. - -Examples of things to consider here include: - -* A periodic task might look like a small addition but if it calls conductor or - another service the load is multiplied by the number of nodes in the system. - -* Scheduler filters get called once per host for every instance being created, - so any latency they introduce is linear with the size of the system. - -* A small change in a utility function or a commonly used decorator can have a - large impacts on performance. - -* Calls which result in a database queries (whether direct or via conductor) - can have a profound impact on performance when called in critical sections of - the code. - -* Will the change include any locking, and if so what considerations are there - on holding the lock? - - -Other deployer impact ---------------------- - -Discuss things that will affect how you deploy and configure OpenStack -that have not already been mentioned, such as: - -* What config options are being added? Should they be more generic than - proposed (for example a flag that other hypervisor drivers might want to - implement as well)? Are the default values ones which will work well in - real deployments? - -* Is this a change that takes immediate effect after its merged, or is it - something that has to be explicitly enabled? - -* If this change is a new binary, how would it be deployed? - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - - -Other contributors: - - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - - -Dependencies -============ - -* Include specific references to specs and/or blueprints in Karbor, or in other - projects, that this one either depends on or is related to. - -* If this requires functionality of another project that is not currently used - by Nova (such as the glance v2 API when we previously only required v1), - document that fact. - -* Does this feature require any new library dependencies or code otherwise not - included in OpenStack? Or does it depend on a specific version of library? - - -Testing -======= - - -Documentation Impact -==================== - - -References -========== - -Please add any useful references here. You are not required to have any -reference. Moreover, this specification should still make sense when your -references are unavailable. Examples of what you could include are: diff --git a/doc/source/specs/trove-database-backup-plugins.rst b/doc/source/specs/trove-database-backup-plugins.rst deleted file mode 100644 index bd6b1897..00000000 --- a/doc/source/specs/trove-database-backup-plugins.rst +++ /dev/null @@ -1,213 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================================== -Trove database instance protectable and protection plugins -========================================================== - -https://blueprints.launchpad.net/karbor/+spec/trove-database-proection-plugin - -Problem description -=================== - -The database instance managed by Trove can not be protected by Karbor now. Currently, -Trove as a Database service allows the user to quickly and easily use database features -without the burden of handling complex administrative tasks. - -The users can use Database service (Trove) to backup a database and store the backup -artifact in the Object Storage service. Later on, if the original database is damaged, -users can use the backup artifact to restore the database. The restore process creates -a database instance. - -So the backup feature of database instance can be introduced to karbor by making a -protection plugin for the database instance. - - -Use Cases -========= - -User creates the database instance in Trove. Then the database instance is used for -saving lots of relational and non-relational data by user. To avoid the loss of these -data, the user want to protect them by making periodic backup of this database instance. -If the user want to restore the database instance, he can create a new database instance -from a backup. - -Proposed change -=============== - -Trove database instance protectable plugin: -------------------------------------------- -A new protectable plugin about Trove database instance need be implemented. -The type of resource database instance is "OS::Trove::Instance". It will be added to the constant -RESOURCE_TYPES in karbor. - - -1. The parent resource types: PROJECT_RESOURCE_TYPE - -2. list the resources: - - This interface of plugin will call the 'list' method of Instances manager in troveclient. - -3. show the resource: - - This interface of plugin will call the 'get' method of Instances manager in troveclient. - The parameter is a database instance id. - -4. get dependent resources: - - The parameter parent_resource is a project, this interface of plugin will return the - database instance in this project. - - -Trove database instance protection plugin ------------------------------------------ -A new protection plugin about Trove database instance need be implemented. - -1. Protect Operation: - - The 'create' method of Backups manager will be called in the main hook - of this operation to make a backup of the database instance. - -2. Restore Operation: - - The 'create' method of Instances manager - will be called in the main hook of this operation to create a new database instance from - the giving backup. - -3. Delete Operation: - - The database instance backup will be deleted. - The 'delete' method of Backups manager will be called in the main hook - of this operation to delete the database instance backup. - -Trove database instance protection plugin schema: -------------------------------------------------- - -:: - - OPTIONS_SCHEMA = { - "title": "Database Instance Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the database instance backup." - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the database instance backup." - } - }, - "required": ["backup_name", "description"] - } - - RESTORE_SCHEMA = { - "title": "Database Instance Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Name", - "description": "The name of the restored database instance.", - "default": None - }, - "restore_description": { - "type": "string", - "title": "Restore Description", - "description": "The description of the restored database instance.", - "default": None - } - } - } - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -Add the database instance protection plugin endpoint to setup.cfg. -Add the database instance protection plugin configuration to provider file. - - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Write database instance backup protectable plugin -* Write database instance backup protection plugin -* Write tests -* Add a usage example about database instance protection - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -Add a usage example about database instance protection. - - -References -========== - -None diff --git a/doc/source/specs/volume-glance-protection-plugin.rst b/doc/source/specs/volume-glance-protection-plugin.rst deleted file mode 100644 index 5be5a103..00000000 --- a/doc/source/specs/volume-glance-protection-plugin.rst +++ /dev/null @@ -1,186 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================ -Glance based cinder volume protection plugin -============================================ - -https://blueprints.launchpad.net/karbor/+spec/backup-volume-data-to-bank - -Problem description -=================== - -Currently, karbor support using cinder backup, cinder snapshot and freezer -plugins to do cinder volume backup. These plugins all store the backup metadata -to the bank but not the volume backup data. - -In the use case of cross site, we need to do cinder volume backup and restore -cross different sites (with different cinder/nova/glance service endpoints), -this requires karbor to save the volume data in an independent storage media -(bank), so that we can do backup in one site and do restore in another site -that the two sites use the same volume backup data in one bank. - -Obviously the cinder volume protection plugins in karbor can not satisfy the -cross site needs now. So we should introduce a new volume protection plugin -which can save volume data to karbor's bank, like image protection plugin. -Backup cinder volume through glance may be a valid choice. - -Use Cases -========= - -As explained, users who want to do cross site backup and restore of cinder -volumes. - -Proposed change -=============== - -Volume glance protection plugin -------------------------------- - -Add a new volume protection plugin which do the backup and restore of cinder -volumes by glance service. Volume data would be stored in the bank as chunks -like what image protection plugin do. - -Steps of protect operation: -1. Create a temporary snapshot to the volume you want to backup -2. Create a temporary volume based on the snapshot in step 1 -3. Create a temporary glance image of the temporary volume -4. Download the temporary image and save it to karbor bank -5. Clean all the temporary resources in step 1 to 4 -6. Save the backup metadata to bank - -Steps of restore operation: -1. Create an image with the volume data in bank -2. Create a volume with the created image in step 1 -3. Wait for the volume status being available -4. Clean the created image in step 1 - -Steps in delete operation: -1. List and delete the objects (volume data and metadata) in bank. - -Volume glance protection plugin schema: ---------------------------------------- - -:: - - OPTIONS_SCHEMA = { - "title": "Volume Glance Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the backup.", - "default": None - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the backup." - } - }, - "required": ["backup_name"] - } - - RESTORE_SCHEMA = { - "title": "Volume Glance Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Resource Name", - "description": "The name of the restore resource ", - "default": None - }, - }, - "required": ["restore_name"] - } - - -Alternatives ------------- - -None - -Data model impact ------------------ - -None. - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -This plugin may be slower than cinder backup and cinder snapshot plugin. - -Other deployer impact ---------------------- - -Add the volume by glance protection plugin endpoint to setup.cfg. -Add the volume by glance protection plugin configuration to provider file. - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: -Pengju Jiao - -Work Items ----------- - -* Write volume by glance protection plugin -* Write tests - -Dependencies -============ - -None - - -Testing -======= - -Unit tests in Karbor. - - -Documentation Impact -==================== - -Add a usage example about volume by glance protection. - - -References -========== - -None diff --git a/etc/README-policy.yaml.txt b/etc/README-policy.yaml.txt deleted file mode 100644 index f34c3a37..00000000 --- a/etc/README-policy.yaml.txt +++ /dev/null @@ -1,4 +0,0 @@ -To generate the sample policy.yaml file, run the following command from the top -level of the karbor directory: - - tox -egenpolicy diff --git a/etc/apache2/apache-karbor-api.conf b/etc/apache2/apache-karbor-api.conf deleted file mode 100644 index 69ae3b89..00000000 --- a/etc/apache2/apache-karbor-api.conf +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using -# karbor API through mod_wsgi -Listen 8799 - - - WSGIDaemonProcess osapi_karbor user=stack group=stack processes=2 threads=2 display-name=%{GROUP} - WSGIProcessGroup osapi_karbor - WSGIScriptAlias / /usr/local/bin/karbor-wsgi - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - - SetEnv APACHE_RUN_USER stack - SetEnv APACHE_RUN_GROUP stack - - ErrorLogFormat "%M" - - ErrorLog /var/log/apache2/karbor_api.log - CustomLog /var/log/apache2/karbor_api_access.log combined - - - Require all granted - - - - diff --git a/etc/api-paste.ini b/etc/api-paste.ini deleted file mode 100644 index d5d54068..00000000 --- a/etc/api-paste.ini +++ /dev/null @@ -1,37 +0,0 @@ -############# -# OpenStack # -############# - -[composite:osapi_karbor] -use = egg:Paste#urlmap -/: apiversions -/v1: openstack_karbor_api_v1 - -[composite:openstack_karbor_api_v1] -use = call:karbor.api.middleware.auth:pipeline_factory -noauth = request_id faultwrap noauth apiv1 -keystone = request_id faultwrap authtoken keystonecontext apiv1 - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:faultwrap] -paste.filter_factory = karbor.api.middleware.fault:FaultWrapper.factory - -[filter:catch_errors] -paste.filter_factory = oslo_middleware:CatchErrors.factory - -[filter:noauth] -paste.filter_factory = karbor.api.middleware.auth:NoAuthMiddleware.factory - -[filter:keystonecontext] -paste.filter_factory = karbor.api.middleware.auth:KarborKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory - -[app:apiversions] -paste.app_factory = karbor.api.versions:Versions.factory - -[app:apiv1] -paste.app_factory = karbor.api.v1.router:APIRouter.factory diff --git a/etc/karbor-policy-generator.conf b/etc/karbor-policy-generator.conf deleted file mode 100644 index 821dcc37..00000000 --- a/etc/karbor-policy-generator.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -output_file = etc/policy.yaml.sample -namespace = karbor diff --git a/etc/oslo-config-generator/karbor.conf b/etc/oslo-config-generator/karbor.conf deleted file mode 100644 index 48ac238e..00000000 --- a/etc/oslo-config-generator/karbor.conf +++ /dev/null @@ -1,9 +0,0 @@ -[DEFAULT] -output_file = etc/karbor.conf.sample -summarize = true -namespace = karbor.common.opts -namespace = keystonemiddleware.auth_token -namespace = oslo.concurrency -namespace = oslo.db -namespace = oslo.log -namespace = oslo.policy diff --git a/etc/providers.d/noop.conf b/etc/providers.d/noop.conf deleted file mode 100644 index 79a88cb6..00000000 --- a/etc/providers.d/noop.conf +++ /dev/null @@ -1,20 +0,0 @@ -[provider] -name = No-Op Provider -description = This provider does nothing for each protect and restore operation. Used for testing -id = b766f37c-d011-4026-8228-28730d734a3f - -plugin=karbor-noop-protection-plugin -bank=karbor-swift-bank-plugin - -enabled=True - -[swift_client] -swift_auth_url=http://127.0.0.1/identity -swift_user=demo -swift_key=password -swift_tenant_name=demo - -[swift_bank_plugin] -lease_expire_window=120 -lease_renew_window=100 -lease_validity_window=100 diff --git a/etc/providers.d/openstack-infra.conf b/etc/providers.d/openstack-infra.conf deleted file mode 100644 index fa2db0fd..00000000 --- a/etc/providers.d/openstack-infra.conf +++ /dev/null @@ -1,25 +0,0 @@ -[provider] -name = OS Infra Provider -description = This provider uses OpenStack's own services (swift, cinder) as storage -id = cf56bd3e-97a7-4078-b6d5-f36246333fd9 - -plugin=karbor-volume-protection-plugin -plugin=karbor-image-protection-plugin -plugin=karbor-server-protection-plugin -plugin=karbor-share-protection-plugin -plugin=karbor-network-protection-plugin -plugin=karbor-database-protection-plugin -bank=karbor-swift-bank-plugin - -enabled=True - -[swift_client] -swift_auth_url=http://127.0.0.1/identity -swift_user=demo -swift_key=password -swift_tenant_name=demo - -[swift_bank_plugin] -lease_expire_window=120 -lease_renew_window=100 -lease_validity_window=100 diff --git a/etc/providers.d/openstack-kubernetes.conf b/etc/providers.d/openstack-kubernetes.conf deleted file mode 100644 index a7b09248..00000000 --- a/etc/providers.d/openstack-kubernetes.conf +++ /dev/null @@ -1,21 +0,0 @@ -[provider] -name = OS Kubernetes Provider -description = This provider is about running the kubernetes cluster on OpenStack with OpenStack cloud provider. -id = e3982e71-f44d-4b09-8abd-3e53e4b80d10 - -plugin=karbor-volume-protection-plugin -plugin=karbor-pod-protection-plugin -bank=karbor-swift-bank-plugin - -enabled=True - -[swift_client] -swift_auth_url=http://127.0.0.1/identity -swift_user=demo -swift_key=password -swift_tenant_name=demo - -[swift_bank_plugin] -lease_expire_window=120 -lease_renew_window=100 -lease_validity_window=100 diff --git a/karbor/__init__.py b/karbor/__init__.py deleted file mode 100644 index 9a9d65a0..00000000 --- a/karbor/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - - -__version__ = pbr.version.VersionInfo( - 'karbor').version_string() diff --git a/karbor/api/__init__.py b/karbor/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/api/common.py b/karbor/api/common.py deleted file mode 100644 index 5f4ed6a2..00000000 --- a/karbor/api/common.py +++ /dev/null @@ -1,323 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os -import re - -from oslo_config import cfg -from oslo_log import log as logging -from six.moves import urllib -import webob - -from karbor import db -from karbor.i18n import _ -from karbor import utils - - -api_common_opts = [ - cfg.IntOpt('osapi_max_limit', - default=1000, - help='The maximum number of items that a collection ' - 'resource returns in a single response'), - cfg.StrOpt('osapi_karbor_base_URL', - help='Base URL that will be presented to users in links ' - 'to the OpenStack Karbor API'), -] - -CONF = cfg.CONF -CONF.register_opts(api_common_opts) - -LOG = logging.getLogger(__name__) - - -def get_pagination_params(params, max_limit=None): - """Return marker, limit, offset tuple from request. - - :param params: `wsgi.Request`'s GET dictionary, possibly containing - 'marker', 'limit', and 'offset' variables. 'marker' is the - id of the last element the client has seen, 'limit' is the - maximum number of items to return and 'offset' is the number - of items to skip from the marker or from the first element. - If 'limit' is not specified, or > max_limit, we default to - max_limit. Negative values for either offset or limit will - cause exc.HTTPBadRequest() exceptions to be raised. If no - offset is present we'll default to 0 and if no marker is - present we'll default to None. - :max_limit: Max value 'limit' return value can take - :returns: Tuple (marker, limit, offset) - """ - max_limit = max_limit or CONF.osapi_max_limit - limit = _get_limit_param(params, max_limit) - marker = _get_marker_param(params) - offset = _get_offset_param(params) - return marker, limit, offset - - -def _get_limit_param(params, max_limit=None): - """Extract integer limit from request's dictionary or fail. - - Defaults to max_limit if not present and returns max_limit if present - 'limit' is greater than max_limit. - """ - max_limit = max_limit or CONF.osapi_max_limit - try: - limit = int(params.pop('limit', max_limit)) - except ValueError: - msg = _('limit param must be an integer') - raise webob.exc.HTTPBadRequest(explanation=msg) - if limit <= 0: - msg = _('limit param must be positive') - raise webob.exc.HTTPBadRequest(explanation=msg) - limit = min(limit, max_limit) - return limit - - -def _get_marker_param(params): - """Extract marker id from request's dictionary (defaults to None).""" - return params.pop('marker', None) - - -def _get_offset_param(params): - """Extract offset id from request's dictionary (defaults to 0) or fail.""" - offset = params.pop('offset', 0) - return utils.validate_integer(offset, 'offset', 0, db.MAX_INT) - - -def limited(items, request, max_limit=None): - """Return a slice of items according to requested offset and limit. - - :param items: A sliceable entity - :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' - GET variables. 'offset' is where to start in the list, - and 'limit' is the maximum number of items to return. If - 'limit' is not specified, 0, or > max_limit, we default - to max_limit. Negative values for either offset or limit - will cause exc.HTTPBadRequest() exceptions to be raised. - :kwarg max_limit: The maximum number of items to return from 'items' - """ - max_limit = max_limit or CONF.osapi_max_limit - marker, limit, offset = get_pagination_params(request.GET.copy(), - max_limit) - range_end = offset + (limit or max_limit) - return items[offset:range_end] - - -def limited_by_marker(items, request, max_limit=None): - """Return a slice of items according to the requested marker and limit.""" - max_limit = max_limit or CONF.osapi_max_limit - marker, limit, __ = get_pagination_params(request.GET.copy(), max_limit) - - start_index = 0 - if marker: - start_index = -1 - for i, item in enumerate(items): - if 'flavorid' in item: - if item['flavorid'] == marker: - start_index = i + 1 - break - elif item['id'] == marker or item.get('uuid') == marker: - start_index = i + 1 - break - if start_index < 0: - msg = _('marker [%s] not found') % marker - raise webob.exc.HTTPBadRequest(explanation=msg) - range_end = start_index + limit - return items[start_index:range_end] - - -def get_sort_params(params, default_key='created_at', default_dir='desc'): - """Retrieves sort keys/directions parameters. - - Processes the parameters to create a list of sort keys and sort directions - that correspond to either the 'sort' parameter or the 'sort_key' and - 'sort_dir' parameter values. The value of the 'sort' parameter is a comma- - separated list of sort keys, each key is optionally appended with - ':'. - - Note that the 'sort_key' and 'sort_dir' parameters are deprecated in kilo - and an exception is raised if they are supplied with the 'sort' parameter. - - The sort parameters are removed from the request parameters by this - function. - - :param params: webob.multidict of request parameters (from - karbor.api.openstack.wsgi.Request.params) - :param default_key: default sort key value, added to the list if no - sort keys are supplied - :param default_dir: default sort dir value, added to the list if the - corresponding key does not have a direction - specified - :returns: list of sort keys, list of sort dirs - :raise webob.exc.HTTPBadRequest: If both 'sort' and either 'sort_key' or - 'sort_dir' are supplied parameters - """ - if 'sort' in params and ('sort_key' in params or 'sort_dir' in params): - msg = _("The 'sort_key' and 'sort_dir' parameters are deprecated and " - "cannot be used with the 'sort' parameter.") - raise webob.exc.HTTPBadRequest(explanation=msg) - sort_keys = [] - sort_dirs = [] - if 'sort' in params: - for sort in params.pop('sort').strip().split(','): - sort_key, _sep, sort_dir = sort.partition(':') - if not sort_dir: - sort_dir = default_dir - sort_keys.append(sort_key.strip()) - sort_dirs.append(sort_dir.strip()) - else: - sort_key = params.pop('sort_key', default_key) - sort_dir = params.pop('sort_dir', default_dir) - sort_keys.append(sort_key.strip()) - sort_dirs.append(sort_dir.strip()) - return sort_keys, sort_dirs - - -def get_request_url(request): - url = request.application_url - headers = request.headers - forwarded = headers.get('X-Forwarded-Host') - if forwarded: - url_parts = list(urllib.parse.urlsplit(url)) - url_parts[1] = re.split(r',\s?', forwarded)[-1] - url = urllib.parse.urlunsplit(url_parts).rstrip('/') - return url - - -def remove_version_from_href(href): - """Removes the first api version from the href. - - Given: 'http://www.karbor.com/v1.1/123' - Returns: 'http://www.karbor.com/123' - - Given: 'http://www.karbor.com/v1.1' - Returns: 'http://www.karbor.com' - - """ - parsed_url = urllib.parse.urlsplit(href) - url_parts = parsed_url.path.split('/', 2) - - # NOTE: this should match vX.X or vX - expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') - if expression.match(url_parts[1]): - del url_parts[1] - - new_path = '/'.join(url_parts) - - if new_path == parsed_url.path: - msg = 'href %s does not contain version' % href - LOG.debug(msg) - raise ValueError(msg) - - parsed_url = list(parsed_url) - parsed_url[2] = new_path - return urllib.parse.urlunsplit(parsed_url) - - -class ViewBuilder(object): - """Model API responses as dictionaries.""" - - _collection_name = None - - def _get_links(self, request, identifier): - return [{"rel": "self", - "href": self._get_href_link(request, identifier), }, - {"rel": "bookmark", - "href": self._get_bookmark_link(request, identifier), }] - - def _get_next_link(self, request, identifier, collection_name): - """Return href string with proper limit and marker params.""" - params = request.params.copy() - params["marker"] = identifier - prefix = self._update_link_prefix(get_request_url(request), - CONF.osapi_karbor_base_URL) - url = os.path.join(prefix, - request.environ["karbor.context"].project_id, - collection_name) - return "%s?%s" % (url, urllib.parse.urlencode(params)) - - def _get_href_link(self, request, identifier): - """Return an href string pointing to this object.""" - prefix = self._update_link_prefix(get_request_url(request), - CONF.osapi_karbor_base_URL) - return os.path.join(prefix, - request.environ["karbor.context"].project_id, - self._collection_name, - str(identifier)) - - def _get_bookmark_link(self, request, identifier): - """Create a URL that refers to a specific resource.""" - base_url = remove_version_from_href(get_request_url(request)) - base_url = self._update_link_prefix(base_url, - CONF.osapi_karbor_base_URL) - return os.path.join(base_url, - request.environ["karbor.context"].project_id, - self._collection_name, - str(identifier)) - - def _get_collection_links(self, request, items, collection_name, - item_count=None, id_key="uuid"): - """Retrieve 'next' link, if applicable. - - The next link is included if we are returning as many items as we can, - given the restrictions of limit optional request parameter and - osapi_max_limit configuration parameter as long as we are returning - some elements. - - So we return next link if: - - 1) 'limit' param is specified and equal to the number of items. - 2) 'limit' param is NOT specified and the number of items is - equal to CONF.osapi_max_limit. - - :param request: API request - :param items: List of collection items - :param collection_name: Name of collection, used to generate the - next link for a pagination query - :param item_count: Length of the list of the original collection - items - :param id_key: Attribute key used to retrieve the unique ID, used - to generate the next link marker for a pagination query - :returns: links - """ - item_count = item_count or len(items) - limit = _get_limit_param(request.GET.copy()) - if len(items) and limit <= item_count: - return self._generate_next_link(items, id_key, request, - collection_name) - - return [] - - def _generate_next_link(self, items, id_key, request, - collection_name): - links = [] - last_item = items[-1] - if id_key in last_item: - last_item_id = last_item[id_key] - else: - last_item_id = last_item["id"] - links.append({ - "rel": "next", - "href": self._get_next_link(request, last_item_id, - collection_name), - }) - return links - - def _update_link_prefix(self, orig_url, prefix): - if not prefix: - return orig_url - url_parts = list(urllib.parse.urlsplit(orig_url)) - prefix_parts = list(urllib.parse.urlsplit(prefix)) - url_parts[0:2] = prefix_parts[0:2] - url_parts[2] = prefix_parts[2] + url_parts[2] - - return urllib.parse.urlunsplit(url_parts).rstrip('/') diff --git a/karbor/api/middleware/__init__.py b/karbor/api/middleware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/api/middleware/auth.py b/karbor/api/middleware/auth.py deleted file mode 100644 index 8075f54d..00000000 --- a/karbor/api/middleware/auth.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Common Auth Middleware. - -""" - - -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_middleware import request_id -from oslo_serialization import jsonutils -import webob.dec -import webob.exc - -from karbor.api.openstack import wsgi -from karbor import context -from karbor.i18n import _ -from karbor.wsgi import common as base_wsgi - - -use_forwarded_for_opt = cfg.BoolOpt( - 'use_forwarded_for', - default=False, - help='Treat X-Forwarded-For as the canonical remote address. ' - 'Only enable this if you have a sanitizing proxy.') - -CONF = cfg.CONF -CONF.register_opt(use_forwarded_for_opt) - -LOG = logging.getLogger(__name__) - - -def pipeline_factory(loader, global_conf, **local_conf): - """A paste pipeline replica that keys off of auth_strategy.""" - pipeline = local_conf[CONF.auth_strategy] - pipeline = pipeline.split() - filters = [loader.get_filter(n) for n in pipeline[:-1]] - app = loader.get_app(pipeline[-1]) - filters.reverse() - for filter in filters: - app = filter(app) - return app - - -class InjectContext(base_wsgi.Middleware): - """Add a 'karbor.context' to WSGI environment.""" - - def __init__(self, context, *args, **kwargs): - self.context = context - super(InjectContext, self).__init__(*args, **kwargs) - - @webob.dec.wsgify(RequestClass=base_wsgi.Request) - def __call__(self, req): - req.environ['karbor.context'] = self.context - return self.application - - -class KarborKeystoneContext(base_wsgi.Middleware): - """Make a request context from keystone headers.""" - - @webob.dec.wsgify(RequestClass=base_wsgi.Request) - def __call__(self, req): - headers = req.headers - environ = req.environ - - user_id = headers.get('X_USER_ID') or headers.get('X_USER') - if user_id is None: - LOG.debug("Neither X_USER_ID nor X_USER found in request") - return webob.exc.HTTPUnauthorized() - # get the roles - roles = [r.strip() for r in headers.get('X_ROLE', '').split(',')] - if 'X_TENANT_ID' in headers: - # This is the new header since Keystone went to ID/Name - project_id = headers['X_TENANT_ID'] - else: - # This is for legacy compatibility - project_id = headers['X_TENANT'] - - project_name = headers.get('X_TENANT_NAME') - - req_id = environ.get(request_id.ENV_REQUEST_ID) - - # Get the auth token - auth_token = headers.get('X_AUTH_TOKEN', - headers.get('X_STORAGE_TOKEN')) - - # Build a context, including the auth_token... - remote_address = req.remote_addr - - auth_token_info = environ.get('keystone.token_info') - - service_catalog = None - if headers.get('X_SERVICE_CATALOG') is not None: - try: - catalog_header = headers.get('X_SERVICE_CATALOG') - service_catalog = jsonutils.loads(catalog_header) - except ValueError: - raise webob.exc.HTTPInternalServerError( - explanation=_('Invalid service catalog json.')) - - if CONF.use_forwarded_for: - remote_address = headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext(user_id, - project_id, - project_name=project_name, - roles=roles, - auth_token=auth_token, - remote_address=remote_address, - service_catalog=service_catalog, - request_id=req_id, - auth_token_info=auth_token_info) - - environ['karbor.context'] = ctx - return self.application - - -class NoAuthMiddleware(base_wsgi.Middleware): - """Return a fake token if one isn't specified.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - if 'X-Auth-Token' not in req.headers: - user_id = req.headers.get('X-Auth-User', 'admin') - project_id = req.headers.get('X-Auth-Project-Id', 'admin') - os_url = os.path.join(req.url, project_id) - res = webob.Response() - # NOTE(vish): This is expecting and returning Auth(1.1), whereas - # keystone uses 2.0 auth. We should probably allow - # 2.0 auth here as well. - res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) - res.headers['X-Server-Management-Url'] = os_url - res.content_type = 'text/plain' - res.status = '204' - return res - - token = req.headers['X-Auth-Token'] - user_id, _sep, project_id = token.partition(':') - project_id = project_id or user_id - remote_address = getattr(req, 'remote_address', '127.0.0.1') - if CONF.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext(user_id, - project_id, - is_admin=True, - remote_address=remote_address) - - req.environ['karbor.context'] = ctx - return self.application diff --git a/karbor/api/middleware/fault.py b/karbor/api/middleware/fault.py deleted file mode 100644 index 37fc0530..00000000 --- a/karbor/api/middleware/fault.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -import six -from six.moves import http_client -import webob.dec -import webob.exc - -from karbor.api.openstack import wsgi -from karbor import exception -from karbor import utils -from karbor.wsgi import common as base_wsgi - - -LOG = logging.getLogger(__name__) - - -class FaultWrapper(base_wsgi.Middleware): - """Calls down the middleware stack, making exceptions into faults.""" - - _status_to_type = {} - - @staticmethod - def status_to_type(status): - if not FaultWrapper._status_to_type: - for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): - FaultWrapper._status_to_type[clazz.code] = clazz - return FaultWrapper._status_to_type.get( - status, webob.exc.HTTPInternalServerError)() - - def _error(self, inner, req): - LOG.error('Middleware error occurred: %(type)s %(error)s', - {'type': type(inner), - 'error': inner}) - safe = getattr(inner, 'safe', False) - headers = getattr(inner, 'headers', None) - status = getattr(inner, 'code', http_client.INTERNAL_SERVER_ERROR) - if status is None: - status = http_client.INTERNAL_SERVER_ERROR - - msg_dict = dict(url=req.url, status=status) - LOG.info("%(url)s returned with HTTP %(status)d", msg_dict) - outer = self.status_to_type(status) - if headers: - outer.headers = headers - - if safe: - msg = (inner.msg if isinstance(inner, exception.KarborException) - else six.text_type(inner)) - outer.explanation = msg - return wsgi.Fault(outer) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - try: - return req.get_response(self.application) - except Exception as ex: - return self._error(ex, req) diff --git a/karbor/api/openstack/__init__.py b/karbor/api/openstack/__init__.py deleted file mode 100644 index 60cb2bf0..00000000 --- a/karbor/api/openstack/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -WSGI middleware for OpenStack API controllers. -""" - -import routes - - -class APIMapper(routes.Mapper): - def routematch(self, url=None, environ=None): - if url == "": - result = self._match("", environ) - return result[0], result[1] - return routes.Mapper.routematch(self, url, environ) - - def connect(self, *args, **kwargs): - # NOTE(inhye): Default the format part of a route to only accept json - # and xml so it doesn't eat all characters after a '.' - # in the url. - kwargs.setdefault('requirements', {}) - if not kwargs['requirements'].get('format'): - kwargs['requirements']['format'] = 'json|xml' - return routes.Mapper.connect(self, *args, **kwargs) - - -class ProjectMapper(APIMapper): - def resource(self, member_name, collection_name, **kwargs): - if 'parent_resource' not in kwargs: - kwargs['path_prefix'] = '{project_id}/' - else: - parent_resource = kwargs['parent_resource'] - p_collection = parent_resource['collection_name'] - p_member = parent_resource['member_name'] - kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, - p_member) - routes.Mapper.resource(self, - member_name, - collection_name, - **kwargs) diff --git a/karbor/api/openstack/wsgi.py b/karbor/api/openstack/wsgi.py deleted file mode 100644 index 17552229..00000000 --- a/karbor/api/openstack/wsgi.py +++ /dev/null @@ -1,985 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import math -import time - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import excutils -from oslo_utils import strutils -import six -from six.moves import http_client -import webob - -from karbor import exception -from karbor import i18n -from karbor.i18n import _ -from karbor.wsgi import common as wsgi - - -LOG = logging.getLogger(__name__) - -SUPPORTED_CONTENT_TYPES = ( - 'application/json', -) - -_MEDIA_TYPE_MAP = { - 'application/json': 'json', -} - - -class Request(wsgi.Request): - - def best_match_content_type(self): - """Determine the most acceptable content-type. - - Based on: - 1) URI extension (.json) - 2) Content-type header - 3) Accept* headers - """ - # First lookup http request path - parts = self.path.rsplit('.', 1) - if len(parts) > 1: - _format = parts[1] - if _format in ['json']: - return 'application/{0}'.format(_format) - - # Then look up content header - type_from_header = self.get_content_type() - if type_from_header: - return type_from_header - ctypes = ['application/json'] - - # Finally search in Accept-* headers - bm = self.accept.best_match(ctypes) - return bm or 'application/json' - - def get_content_type(self): - allowed_types = ("application/json",) - if "Content-Type" not in self.headers: - LOG.debug("Missing Content-Type") - return None - _type = self.content_type - if _type in allowed_types: - return _type - return None - - def best_match_language(self): - """Determines best available locale from the Accept-Language header. - - :returns: the best language match or None if the 'Accept-Language' - header was not available in the request. - """ - if not self.accept_language: - return None - all_languages = i18n.get_available_languages() - return self.accept_language.best_match(all_languages) - - -class ActionDispatcher(object): - """Maps method name to local methods through action name.""" - - def dispatch(self, *args, **kwargs): - """Find and call local method.""" - action = kwargs.pop('action', 'default') - action_method = getattr(self, str(action), self.default) - return action_method(*args, **kwargs) - - def default(self, data): - raise NotImplementedError() - - -class TextDeserializer(ActionDispatcher): - """Default request body deserialization.""" - - def deserialize(self, datastring, action='default'): - return self.dispatch(datastring, action=action) - - def default(self, datastring): - return {} - - -class JSONDeserializer(TextDeserializer): - - def _from_json(self, datastring): - try: - return jsonutils.loads(datastring) - except ValueError: - msg = _("cannot understand JSON") - raise exception.MalformedRequestBody(reason=msg) - - def default(self, datastring): - return {'body': self._from_json(datastring)} - - -class DictSerializer(ActionDispatcher): - """Default request body serialization.""" - - def serialize(self, data, action='default'): - return self.dispatch(data, action=action) - - def default(self, data): - return "" - - -class JSONDictSerializer(DictSerializer): - """Default JSON request body serialization.""" - - def default(self, data): - return jsonutils.dumps(data) - - -def serializers(**serializers): - """Attaches serializers to a method. - - This decorator associates a dictionary of serializers with a - method. Note that the function attributes are directly - manipulated; the method is not wrapped. - """ - - def decorator(func): - if not hasattr(func, 'wsgi_serializers'): - func.wsgi_serializers = {} - func.wsgi_serializers.update(serializers) - return func - return decorator - - -def deserializers(**deserializers): - """Attaches deserializers to a method. - - This decorator associates a dictionary of deserializers with a - method. Note that the function attributes are directly - manipulated; the method is not wrapped. - """ - - def decorator(func): - if not hasattr(func, 'wsgi_deserializers'): - func.wsgi_deserializers = {} - func.wsgi_deserializers.update(deserializers) - return func - return decorator - - -def response(code): - """Attaches response code to a method. - - This decorator associates a response code with a method. Note - that the function attributes are directly manipulated; the method - is not wrapped. - """ - - def decorator(func): - func.wsgi_code = code - return func - return decorator - - -class ResponseObject(object): - """Bundles a response object with appropriate serializers. - - Object that app methods may return in order to bind alternate - serializers with a response object to be serialized. Its use is - optional. - """ - - def __init__(self, obj, code=None, **serializers): - """Binds serializers with an object. - - Takes keyword arguments akin to the @serializer() decorator - for specifying serializers. Serializers specified will be - given preference over default serializers or method-specific - serializers on return. - """ - - self.obj = obj - self.serializers = serializers - self._default_code = http_client.OK - self._code = code - self._headers = {} - self.serializer = None - self.media_type = None - - def __getitem__(self, key): - """Retrieves a header with the given name.""" - - return self._headers[key.lower()] - - def __setitem__(self, key, value): - """Sets a header with the given name to the given value.""" - - self._headers[key.lower()] = value - - def __delitem__(self, key): - """Deletes the header with the given name.""" - - del self._headers[key.lower()] - - def _bind_method_serializers(self, meth_serializers): - """Binds method serializers with the response object. - - Binds the method serializers with the response object. - Serializers specified to the constructor will take precedence - over serializers specified to this method. - - :param meth_serializers: A dictionary with keys mapping to - response types and values containing - serializer objects. - """ - - # We can't use update because that would be the wrong - # precedence - for mtype, serializer in meth_serializers.items(): - self.serializers.setdefault(mtype, serializer) - - def get_serializer(self, content_type, default_serializers=None): - """Returns the serializer for the wrapped object. - - Returns the serializer for the wrapped object subject to the - indicated content type. If no serializer matching the content - type is attached, an appropriate serializer drawn from the - default serializers will be used. If no appropriate - serializer is available, raises InvalidContentType. - """ - - default_serializers = default_serializers or {} - - try: - mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) - if mtype in self.serializers: - return mtype, self.serializers[mtype] - else: - return mtype, default_serializers[mtype] - except (KeyError, TypeError): - raise exception.InvalidContentType(content_type=content_type) - - def preserialize(self, content_type, default_serializers=None): - """Prepares the serializer that will be used to serialize. - - Determines the serializer that will be used and prepares an - instance of it for later call. This allows the serializer to - be accessed by extensions for, e.g., template extension. - """ - - mtype, serializer = self.get_serializer(content_type, - default_serializers) - self.media_type = mtype - self.serializer = serializer() - - def attach(self, **kwargs): - """Attach slave templates to serializers.""" - - if self.media_type in kwargs: - self.serializer.attach(kwargs[self.media_type]) - - def serialize(self, request, content_type, default_serializers=None): - """Serializes the wrapped object. - - Utility method for serializing the wrapped object. Returns a - webob.Response object. - """ - - if self.serializer: - serializer = self.serializer - else: - _mtype, _serializer = self.get_serializer(content_type, - default_serializers) - serializer = _serializer() - - response = webob.Response() - response.status_int = self.code - for hdr, value in self._headers.items(): - response.headers[hdr] = value - response.headers['Content-Type'] = content_type - if self.obj is not None: - body = serializer.serialize(self.obj) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - response.body = body - - return response - - @property - def code(self): - """Retrieve the response status.""" - - return self._code or self._default_code - - @property - def headers(self): - """Retrieve the headers.""" - - return self._headers.copy() - - -def action_peek_json(body): - """Determine action to invoke.""" - - try: - decoded = jsonutils.loads(body) - except ValueError: - msg = _("cannot understand JSON") - raise exception.MalformedRequestBody(reason=msg) - - # Make sure there's exactly one key... - if len(decoded) != 1: - msg = _("too many body keys") - raise exception.MalformedRequestBody(reason=msg) - - # Return the action and the decoded body... - return list(decoded.keys())[0] - - -class ResourceExceptionHandler(object): - """Context manager to handle Resource exceptions. - - Used when processing exceptions generated by API implementation - methods (or their extensions). Converts most exceptions to Fault - exceptions, with the appropriate logging. - """ - - def __enter__(self): - return None - - def __exit__(self, ex_type, ex_value, ex_traceback): - if not ex_value: - return True - - if isinstance(ex_value, exception.NotAuthorized): - raise Fault(webob.exc.HTTPForbidden(explanation=ex_value.msg)) - elif isinstance(ex_value, exception.Invalid): - raise Fault(exception.ConvertedException( - code=ex_value.code, explanation=ex_value.msg)) - elif isinstance(ex_value, TypeError): - exc_info = (ex_type, ex_value, ex_traceback) - LOG.error('Exception handling resource: %s', - ex_value, exc_info=exc_info) - raise Fault(webob.exc.HTTPBadRequest()) - elif isinstance(ex_value, Fault): - LOG.info("Fault thrown: %s", ex_value) - raise ex_value - elif isinstance(ex_value, webob.exc.HTTPException): - LOG.info("HTTP exception thrown: %s", ex_value) - raise Fault(ex_value) - - # We didn't handle the exception - return False - - -class Resource(wsgi.Application): - """WSGI app that handles (de)serialization and controller dispatch. - - WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method upon its controller. All - controller action methods must accept a 'req' argument, which is the - incoming wsgi.Request. If the operation is a PUT or POST, the controller - method must also accept a 'body' argument (the deserialized request body). - They may raise a webob.exc exception or return a dict, which will be - serialized by requested content type. - - Exceptions derived from webob.exc.HTTPException will be automatically - wrapped in Fault() to provide API friendly error responses. - """ - - def __init__(self, controller, action_peek=None, **deserializers): - """Initialize Resource. - - :param controller: object that implement methods created by routes lib - :param action_peek: dictionary of routines for peeking into an action - request body to determine the desired action - """ - - self.controller = controller - - default_deserializers = dict(json=JSONDeserializer) - default_deserializers.update(deserializers) - - self.default_deserializers = default_deserializers - self.default_serializers = dict(json=JSONDictSerializer) - - self.action_peek = dict(json=action_peek_json) - self.action_peek.update(action_peek or {}) - - # Copy over the actions dictionary - self.wsgi_actions = {} - if controller: - self.register_actions(controller) - - # Save a mapping of extensions - self.wsgi_extensions = {} - self.wsgi_action_extensions = {} - - def register_actions(self, controller): - """Registers controller actions with this resource.""" - - actions = getattr(controller, 'wsgi_actions', {}) - for key, method_name in actions.items(): - self.wsgi_actions[key] = getattr(controller, method_name) - - def register_extensions(self, controller): - """Registers controller extensions with this resource.""" - - extensions = getattr(controller, 'wsgi_extensions', []) - for method_name, action_name in extensions: - # Look up the extending method - extension = getattr(controller, method_name) - - if action_name: - # Extending an action... - if action_name not in self.wsgi_action_extensions: - self.wsgi_action_extensions[action_name] = [] - self.wsgi_action_extensions[action_name].append(extension) - else: - # Extending a regular method - if method_name not in self.wsgi_extensions: - self.wsgi_extensions[method_name] = [] - self.wsgi_extensions[method_name].append(extension) - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - - # NOTE(Vek): Check for get_action_args() override in the - # controller - if hasattr(self.controller, 'get_action_args'): - return self.controller.get_action_args(request_environment) - - try: - args = request_environment['wsgiorg.routing_args'][1].copy() - except (KeyError, IndexError, AttributeError): - return {} - - try: - del args['controller'] - except KeyError: - pass - - try: - del args['format'] - except KeyError: - pass - - return args - - def get_body(self, request): - - if len(request.body) == 0: - LOG.debug("Empty body provided in request") - return None, '' - - try: - content_type = request.get_content_type() - except exception.InvalidContentType: - LOG.debug("Unrecognized Content-Type provided in request") - return None, '' - - if not content_type: - LOG.debug("No Content-Type provided in request") - return None, '' - - return content_type, request.body - - def deserialize(self, meth, content_type, body): - meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) - try: - mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) - if mtype in meth_deserializers: - deserializer = meth_deserializers[mtype] - else: - deserializer = self.default_deserializers[mtype] - except (KeyError, TypeError): - raise exception.InvalidContentType(content_type=content_type) - - return deserializer().deserialize(body) - - def pre_process_extensions(self, extensions, request, action_args): - # List of callables for post-processing extensions - post = [] - - for ext in extensions: - if inspect.isgeneratorfunction(ext): - response = None - - # If it's a generator function, the part before the - # yield is the preprocessing stage - try: - with ResourceExceptionHandler(): - gen = ext(req=request, **action_args) - response = next(gen) - except Fault as ex: - response = ex - - # We had a response... - if response: - return response, [] - - # No response, queue up generator for post-processing - post.append(gen) - else: - # Regular functions only perform post-processing - post.append(ext) - - # Run post-processing in the reverse order - return None, reversed(post) - - def post_process_extensions(self, extensions, resp_obj, request, - action_args): - for ext in extensions: - response = None - if inspect.isgenerator(ext): - # If it's a generator, run the second half of - # processing - try: - with ResourceExceptionHandler(): - response = ext.send(resp_obj) - except StopIteration: - # Normal exit of generator - continue - except Fault as ex: - response = ex - else: - # Regular functions get post-processing... - try: - with ResourceExceptionHandler(): - response = ext(req=request, resp_obj=resp_obj, - **action_args) - except Fault as ex: - response = ex - - # We had a response... - if response: - return response - - return None - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """WSGI method that controls (de)serialization and method dispatch.""" - - LOG.info("%(method)s %(url)s", - {"method": request.method, "url": request.url}) - - # Identify the action, its arguments, and the requested - # content type - action_args = self.get_action_args(request.environ) - action = action_args.pop('action', None) - content_type, body = self.get_body(request) - accept = request.best_match_content_type() - - # NOTE(Vek): Splitting the function up this way allows for - # auditing by external tools that wrap the existing - # function. If we try to audit __call__(), we can - # run into troubles due to the @webob.dec.wsgify() - # decorator. - return self._process_stack(request, action, action_args, - content_type, body, accept) - - def _process_stack(self, request, action, action_args, - content_type, body, accept): - """Implement the processing stack.""" - - # Get the implementing method - try: - meth, extensions = self.get_method(request, action, - content_type, body) - except (AttributeError, TypeError): - return Fault(webob.exc.HTTPNotFound()) - except KeyError as ex: - msg = _("There is no such action: %s") % ex.args[0] - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - except exception.MalformedRequestBody: - msg = _("Malformed request body") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - # Now, deserialize the request body... - try: - if content_type: - contents = self.deserialize(meth, content_type, body) - else: - contents = {} - except exception.InvalidContentType: - msg = _("Unsupported Content-Type") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - except exception.MalformedRequestBody: - msg = _("Malformed request body") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - # Update the action args - action_args.update(contents) - - project_id = action_args.pop("project_id", None) - context = request.environ.get('karbor.context') - if (context and project_id and (project_id != context.project_id)): - msg = _("Malformed request url") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - # Run pre-processing extensions - response, post = self.pre_process_extensions(extensions, - request, action_args) - - if not response: - try: - with ResourceExceptionHandler(): - action_result = self.dispatch(meth, request, action_args) - except Fault as ex: - response = ex - - if not response: - # No exceptions; convert action_result into a - # ResponseObject - resp_obj = None - if type(action_result) is dict or action_result is None: - resp_obj = ResponseObject(action_result) - elif isinstance(action_result, ResponseObject): - resp_obj = action_result - else: - response = action_result - - # Run post-processing extensions - if resp_obj: - _set_request_id_header(request, resp_obj) - # Do a preserialize to set up the response object - serializers = getattr(meth, 'wsgi_serializers', {}) - resp_obj._bind_method_serializers(serializers) - if hasattr(meth, 'wsgi_code'): - resp_obj._default_code = meth.wsgi_code - resp_obj.preserialize(accept, self.default_serializers) - - # Process post-processing extensions - response = self.post_process_extensions(post, resp_obj, - request, action_args) - - if resp_obj and not response: - response = resp_obj.serialize(request, accept, - self.default_serializers) - - try: - msg_dict = dict(url=request.url, status=response.status_int) - msg = "%(url)s returned with HTTP %(status)d" - except AttributeError as e: - msg_dict = dict(url=request.url, e=e) - msg = "%(url)s returned a fault: %(e)s" - - LOG.info(msg, msg_dict) - - return response - - def get_method(self, request, action, content_type, body): - """Look up the action-specific method and its extensions.""" - - # Look up the method - try: - if not self.controller: - meth = getattr(self, action) - else: - meth = getattr(self.controller, action) - except AttributeError as e: - with excutils.save_and_reraise_exception(e) as ctxt: - if (not self.wsgi_actions or action not in ['action', - 'create', - 'delete', - 'update']): - LOG.exception('Get method error.') - else: - ctxt.reraise = False - else: - return meth, self.wsgi_extensions.get(action, []) - - if action == 'action': - # OK, it's an action; figure out which action... - mtype = _MEDIA_TYPE_MAP.get(content_type) - action_name = self.action_peek[mtype](body) - LOG.debug("Action body: %s", body) - else: - action_name = action - - # Look up the action method - return (self.wsgi_actions[action_name], - self.wsgi_action_extensions.get(action_name, [])) - - def dispatch(self, method, request, action_args): - """Dispatch a call to the action-specific method.""" - - return method(req=request, **action_args) - - -def action(name): - """Mark a function as an action. - - The given name will be taken as the action key in the body. - - This is also overloaded to allow extensions to provide - non-extending definitions of create and delete operations. - """ - - def decorator(func): - func.wsgi_action = name - return func - return decorator - - -def extends(*args, **kwargs): - """Indicate a function extends an operation. - - Can be used as either:: - - @extends - def index(...): - pass - - or as:: - - @extends(action='resize') - def _action_resize(...): - pass - """ - - def decorator(func): - # Store enough information to find what we're extending - func.wsgi_extends = (func.__name__, kwargs.get('action')) - return func - - # If we have positional arguments, call the decorator - if args: - return decorator(*args) - - # OK, return the decorator instead - return decorator - - -class ControllerMetaclass(type): - """Controller metaclass. - - This metaclass automates the task of assembling a dictionary - mapping action keys to method names. - """ - - def __new__(mcs, name, bases, cls_dict): - """Adds the wsgi_actions dictionary to the class.""" - - # Find all actions - actions = {} - extensions = [] - # start with wsgi actions from base classes - for base in bases: - actions.update(getattr(base, 'wsgi_actions', {})) - for key, value in cls_dict.items(): - if not callable(value): - continue - if getattr(value, 'wsgi_action', None): - actions[value.wsgi_action] = key - elif getattr(value, 'wsgi_extends', None): - extensions.append(value.wsgi_extends) - - # Add the actions and extensions to the class dict - cls_dict['wsgi_actions'] = actions - cls_dict['wsgi_extensions'] = extensions - - return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, - cls_dict) - - -@six.add_metaclass(ControllerMetaclass) -class Controller(object): - """Default controller.""" - - _view_builder_class = None - - def __init__(self, view_builder=None): - """Initialize controller with a view builder instance.""" - if view_builder: - self._view_builder = view_builder - elif self._view_builder_class: - self._view_builder = self._view_builder_class() - else: - self._view_builder = None - - @staticmethod - def is_valid_body(body, entity_name): - if not (body and entity_name in body): - return False - - def is_dict(d): - try: - d.get(None) - return True - except AttributeError: - return False - - if not is_dict(body[entity_name]): - return False - - return True - - @staticmethod - def assert_valid_body(body, entity_name): - # NOTE: After v1 api is deprecated need to merge 'is_valid_body' and - # 'assert_valid_body' in to one method. Right now it is not - # possible to modify 'is_valid_body' to raise exception because - # in case of V1 api when 'is_valid_body' return False, - # 'HTTPUnprocessableEntity' exception is getting raised and in - # V2 api 'HTTPBadRequest' exception is getting raised. - if not Controller.is_valid_body(body, entity_name): - raise webob.exc.HTTPBadRequest( - explanation=_("Missing required element '%s' in " - "request body.") % entity_name) - - @staticmethod - def validate_name_and_description(body): - name = body.get('name') - if name is not None: - if isinstance(name, six.string_types): - body['name'] = name.strip() - try: - strutils.check_string_length(body['name'], 'Name', - min_length=0, max_length=255) - except exception.InvalidInput as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - - description = body.get('description') - if description is not None: - try: - strutils.check_string_length(description, 'Description', - min_length=0, max_length=255) - except exception.InvalidInput as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - - @staticmethod - def validate_string_length(value, entity_name, min_length=0, - max_length=None, remove_whitespaces=False): - """Check the length of specified string. - - :param value: the value of the string - :param entity_name: the name of the string - :param min_length: the min_length of the string - :param max_length: the max_length of the string - :param remove_whitespaces: True if trimming whitespaces is needed - else False - """ - if isinstance(value, six.string_types) and remove_whitespaces: - value = value.strip() - try: - strutils.check_string_length(value, entity_name, - min_length=min_length, - max_length=max_length) - except exception.InvalidInput as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - - -class Fault(webob.exc.HTTPException): - """Wrap webob.exc.HTTPException to provide API friendly response.""" - - _fault_names = {http_client.BAD_REQUEST: "badRequest", - http_client.UNAUTHORIZED: "unauthorized", - http_client.FORBIDDEN: "forbidden", - http_client.NOT_FOUND: "itemNotFound", - http_client.METHOD_NOT_ALLOWED: "badMethod", - http_client.CONFLICT: "conflictingRequest", - http_client.REQUEST_ENTITY_TOO_LARGE: "overLimit", - http_client.UNSUPPORTED_MEDIA_TYPE: "badMediaType", - http_client.NOT_IMPLEMENTED: "notImplemented", - http_client.SERVICE_UNAVAILABLE: "serviceUnavailable"} - - def __init__(self, exception): - """Create a Fault for the given webob.exc.exception.""" - self.wrapped_exc = exception - self.status_int = exception.status_int - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Generate a WSGI response based on the exception passed to ctor.""" - # Replace the body with fault details. - locale = req.best_match_language() - code = self.wrapped_exc.status_int - fault_name = self._fault_names.get(code, "computeFault") - explanation = self.wrapped_exc.explanation - fault_data = { - fault_name: { - 'code': code, - 'message': i18n.translate(explanation, locale)}} - if code == http_client.REQUEST_ENTITY_TOO_LARGE: - retry = self.wrapped_exc.headers.get('Retry-After', None) - if retry: - fault_data[fault_name]['retryAfter'] = retry - - # 'code' is an attribute on the fault tag itself - - content_type = req.best_match_content_type() - serializer = { - 'application/json': JSONDictSerializer(), - }[content_type] - - body = serializer.serialize(fault_data) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - self.wrapped_exc.body = body - self.wrapped_exc.content_type = content_type - _set_request_id_header(req, self.wrapped_exc.headers) - - return self.wrapped_exc - - def __str__(self): - return self.wrapped_exc.__str__() - - -def _set_request_id_header(req, headers): - context = req.environ.get('karbor.context') - if context: - headers['x-compute-request-id'] = context.request_id - - -class OverLimitFault(webob.exc.HTTPException): - """Rate-limited request response.""" - - def __init__(self, message, details, retry_time): - """Initialize new `OverLimitFault` with relevant information.""" - hdrs = OverLimitFault._retry_after(retry_time) - self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) - self.content = { - "overLimitFault": { - "code": self.wrapped_exc.status_int, - "message": message, - "details": details, - }, - } - - @staticmethod - def _retry_after(retry_time): - delay = int(math.ceil(retry_time - time.time())) - retry_after = delay if delay > 0 else 0 - headers = {'Retry-After': '%d' % retry_after} - return headers - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """Serializes the wrapped exception conforming to our error format.""" - content_type = request.best_match_content_type() - - def translate(msg): - locale = request.best_match_language() - return i18n.translate(msg, locale) - - self.content['overLimitFault']['message'] = translate( - self.content['overLimitFault']['message']) - self.content['overLimitFault']['details'] = translate( - self.content['overLimitFault']['details']) - - serializer = { - 'application/json': JSONDictSerializer(), - }[content_type] - - content = serializer.serialize(self.content) - self.wrapped_exc.body = content - - return self.wrapped_exc diff --git a/karbor/api/schemas/__init__.py b/karbor/api/schemas/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/api/schemas/checkpoints.py b/karbor/api/schemas/checkpoints.py deleted file mode 100644 index 813413f1..00000000 --- a/karbor/api/schemas/checkpoints.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 Checkpoints API. - -""" - -from karbor.api.validation import parameter_types - - -create = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'checkpoint': { - 'type': 'object', - 'properties': { - 'plan_id': parameter_types.uuid, - 'extra-info': parameter_types.metadata, - }, - 'required': ['plan_id'], - 'additionalProperties': False, - }, - }, - 'required': ['checkpoint'], - 'additionalProperties': False, -} - -update = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'os-resetState': { - 'type': 'object', - 'properties': { - 'state': { - 'type': 'string', - 'enum': ['available', 'error'], - }, - }, - 'required': ['state'], - 'additionalProperties': False, - }, - }, - 'required': ['os-resetState'], - 'additionalProperties': False, -} diff --git a/karbor/api/schemas/copies.py b/karbor/api/schemas/copies.py deleted file mode 100644 index 6ce8dbc4..00000000 --- a/karbor/api/schemas/copies.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 Copies API. - -""" - -from karbor.api.validation import parameter_types - - -create = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'copy': { - 'type': 'object', - 'properties': { - 'plan_id': parameter_types.uuid, - 'parameters': parameter_types.parameters, - }, - 'required': ['plan_id'], - 'additionalProperties': False, - }, - }, - 'required': ['copy'], - 'additionalProperties': False, -} diff --git a/karbor/api/schemas/plans.py b/karbor/api/schemas/plans.py deleted file mode 100644 index 65f685d2..00000000 --- a/karbor/api/schemas/plans.py +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 Plans API. - -""" - -from karbor.api.validation import parameter_types - - -create = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'plan': { - 'type': 'object', - 'properties': { - 'name': parameter_types.name, - 'description': parameter_types.description, - 'provider_id': parameter_types.uuid, - 'parameters': parameter_types.parameters, - 'resources': parameter_types.resources, - }, - 'required': ['provider_id', 'parameters'], - 'additionalProperties': False, - }, - }, - 'required': ['plan'], - 'additionalProperties': False, -} - -update = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'plan': { - 'type': 'object', - 'properties': { - 'name': parameter_types.name, - 'status': {'type': ['string', 'null']}, - 'resources': parameter_types.resources, - 'description': parameter_types.description, - }, - 'required': [], - 'additionalProperties': False, - }, - }, - 'required': ['plan'], - 'additionalProperties': False, -} diff --git a/karbor/api/schemas/quota_classes.py b/karbor/api/schemas/quota_classes.py deleted file mode 100644 index 59d2944a..00000000 --- a/karbor/api/schemas/quota_classes.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 Quota class API. - -""" - -from karbor.api.validation import parameter_types - -update = { - 'type': 'object', - 'properties': { - 'quota_class': parameter_types.metadata, - }, - 'required': ['quota_class'], - 'additionalProperties': False, -} diff --git a/karbor/api/schemas/quotas.py b/karbor/api/schemas/quotas.py deleted file mode 100644 index 70a908b0..00000000 --- a/karbor/api/schemas/quotas.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 Quotas API. - -""" - -from karbor.api.validation import parameter_types - - -update = { - 'type': 'object', - 'properties': { - 'quota': parameter_types.metadata, - }, - 'required': ['quota'], - 'additionalProperties': False, -} diff --git a/karbor/api/schemas/restores.py b/karbor/api/schemas/restores.py deleted file mode 100644 index 9840e67b..00000000 --- a/karbor/api/schemas/restores.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 Restores API. - -""" - -from karbor.api.validation import parameter_types - - -create = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'restore': { - 'type': 'object', - 'properties': { - 'provider_id': parameter_types.uuid, - 'checkpoint_id': parameter_types.uuid, - 'restore_target': {'type': ['string', 'null']}, - 'restore_auth': parameter_types.metadata, - 'parameters': parameter_types.parameters, - }, - 'required': ['provider_id', 'checkpoint_id', 'parameters'], - 'additionalProperties': False, - }, - }, - 'required': ['restore'], - 'additionalProperties': False, -} diff --git a/karbor/api/schemas/scheduled_operations.py b/karbor/api/schemas/scheduled_operations.py deleted file mode 100644 index 597cd8a6..00000000 --- a/karbor/api/schemas/scheduled_operations.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 scheduled operations API. - -""" - -from karbor.api.validation import parameter_types - - -create = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'scheduled_operation': { - 'type': 'object', - 'properties': { - 'name': parameter_types.name, - 'description': parameter_types.description, - 'operation_type': {'type': 'string'}, - 'trigger_id': parameter_types.uuid, - 'operation_definition': { - 'type': 'object', - 'properties': { - 'provider_id': parameter_types.uuid, - 'plan_id': parameter_types.uuid, - }, - 'required': ['provider_id', 'plan_id'], - 'additionalProperties': True, - }, - - }, - 'required': ['operation_type', 'trigger_id', - 'operation_definition'], - 'additionalProperties': False, - }, - }, - 'required': ['scheduled_operation'], - 'additionalProperties': False, -} diff --git a/karbor/api/schemas/triggers.py b/karbor/api/schemas/triggers.py deleted file mode 100644 index 4a3c2fc8..00000000 --- a/karbor/api/schemas/triggers.py +++ /dev/null @@ -1,78 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 Triggers API. - -""" - - -create = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'trigger_info': { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'type': {'type': 'string'}, - 'properties': { - 'type': 'object', - 'properties': { - 'format': {'type': 'string'}, - 'pattern': {'type': 'string'}, - 'start_time': {'type': 'string'}, - 'end_time': {'type': 'string'}, - 'window': {'type': 'integer'}, - }, - 'required': ['format', 'pattern'], - 'additionalProperties': False, - }, - }, - 'required': ['name', 'type', 'properties'], - 'additionalProperties': False, - }, - }, - 'required': ['trigger_info'], - 'additionalProperties': False, -} - - -update = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'trigger_info': { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'type': {'type': 'string'}, - 'properties': { - 'type': 'object', - 'properties': { - 'format': {'type': 'string'}, - 'pattern': {'type': 'string'}, - 'start_time': {'type': 'string'}, - 'end_time': {'type': 'string'}, - 'window': {'type': 'integer'}, - }, - 'required': [], - 'additionalProperties': False, - }, - }, - 'required': [], - 'additionalProperties': False, - }, - }, - 'required': ['trigger_info'], - 'additionalProperties': False, -} diff --git a/karbor/api/schemas/verifications.py b/karbor/api/schemas/verifications.py deleted file mode 100644 index 17f6b029..00000000 --- a/karbor/api/schemas/verifications.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Schema for Karbor V1 Verifications API. - -""" - -from karbor.api.validation import parameter_types - - -create = { - 'type': 'object', - 'properties': { - 'type': 'object', - 'verification': { - 'type': 'object', - 'properties': { - 'provider_id': parameter_types.uuid, - 'checkpoint_id': parameter_types.uuid, - 'parameters': parameter_types.parameters, - }, - 'required': ['provider_id', 'checkpoint_id'], - 'additionalProperties': False, - }, - }, - 'required': ['verification'], - 'additionalProperties': False, -} diff --git a/karbor/api/v1/__init__.py b/karbor/api/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/api/v1/copies.py b/karbor/api/v1/copies.py deleted file mode 100644 index 4e62da1f..00000000 --- a/karbor/api/v1/copies.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The copy api.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import copies as copy_schema -from karbor.api import validation -from karbor import exception -from karbor.i18n import _ - -from karbor import objects -from karbor.policies import copies as copy_policy -from karbor.services.protection import api as protection_api - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class CopiesViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - def detail(self, request, copy): - """Detailed view of a single copy.""" - copy_ref = { - 'copy': { - 'project_id': copy.get('project_id'), - 'provider_id': copy.get('provider_id'), - 'plan_id': copy.get('plan_id'), - 'checkpoint_id': copy.get('checkpoint_id'), - 'parameters': copy.get('parameters'), - } - } - return copy_ref - - -class CopiesController(wsgi.Controller): - """The copy API controller for the OpenStack API.""" - - _view_builder_class = CopiesViewBuilder - - def __init__(self): - self.protection_api = protection_api.API() - super(CopiesController, self).__init__() - - @validation.schema(copy_schema.create) - def create(self, req, provider_id, body): - """Creates a new copy.""" - - LOG.debug('Create copy request body: %s', body) - context = req.environ['karbor.context'] - context.can(copy_policy.CREATE_POLICY) - copy = body['copy'] - plan_id = copy.get("plan_id", None) - - if not uuidutils.is_uuid_like(provider_id): - msg = _("Invalid provider id provided.") - raise exception.InvalidInput(reason=msg) - - parameters = copy.get("parameters", None) - - try: - plan = objects.Plan.get_by_id(context, plan_id) - except exception.PlanNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - if provider_id != plan.provider_id: - msg = _("The provider id is not the same as the value " - "in the plan.") - raise exception.InvalidInput(reason=msg) - - filters = {'plan_id': plan_id} - checkpoints = self.protection_api.list_checkpoints( - context, provider_id, marker=None, limit=None, - sort_keys=None, sort_dirs=None, filters=filters, offset=None, - all_tenants=False) - - if not checkpoints: - msg = _("The plan has not been protected.") - raise exception.InvalidInput(reason=msg) - - plan.parameters.update(parameters) - try: - checkpoint_copy = self.protection_api.copy(context, plan) - except Exception: - LOG.exception("Failed to create checkpoint copies.") - raise - - copy = { - 'project_id': context.project_id, - 'provider_id': plan.provider_id, - 'plan_id': plan.id, - 'checkpoint_id': checkpoint_copy, - 'parameters': parameters - } - - retval = self._view_builder.detail(req, copy) - return retval - - -def create_resource(): - return wsgi.Resource(CopiesController()) diff --git a/karbor/api/v1/operation_logs.py b/karbor/api/v1/operation_logs.py deleted file mode 100644 index 670fb5b2..00000000 --- a/karbor/api/v1/operation_logs.py +++ /dev/null @@ -1,214 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The operation_logs api.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor import exception -from karbor.i18n import _ - -from karbor import objects -from karbor.policies import operation_logs as operation_log_policy -from karbor.services.operationengine import api as operationengine_api -from karbor.services.protection import api as protection_api -from karbor import utils - -import six - -query_operation_log_filters_opt = cfg.ListOpt( - 'query_operation_log_filters', - default=['checkpoint_id', 'plan_id', 'restore_id', 'status'], - help="Operation log filter options which " - "non-admin user could use to " - "query operation_logs. Default values " - "are: ['checkpoint_id', 'plan_id', 'restore_id', 'status']") - -CONF = cfg.CONF -CONF.register_opt(query_operation_log_filters_opt) - -LOG = logging.getLogger(__name__) - - -class OperationLogViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "operation_logs" - - def detail(self, request, operation_log): - """Detailed view of a single operation_log.""" - - operation_log_ref = { - 'operation_log': { - 'id': operation_log.get('id'), - 'operation_type': operation_log.get('operation_type'), - 'checkpoint_id': operation_log.get('checkpoint_id'), - 'plan_id': operation_log.get('plan_id'), - 'provider_id': operation_log.get('provider_id'), - 'restore_id': operation_log.get('restore_id'), - 'scheduled_operation_id': operation_log.get( - 'scheduled_operation_id'), - 'status': operation_log.get('status'), - 'started_at': operation_log.get('started_at'), - 'ended_at': operation_log.get('ended_at'), - 'error_info': operation_log.get('error_info'), - 'extra_info': operation_log.get('extra_info'), - } - } - return operation_log_ref - - def detail_list(self, request, operation_logs, - operation_log_count=None): - """Detailed view of a list of operation_logs.""" - return self._list_view(self.detail, request, operation_logs, - operation_log_count, - self._collection_name) - - def _list_view(self, func, request, operation_logs, - operation_log_count, - coll_name=_collection_name): - """Provide a view for a list of operation_logs. - - """ - operation_logs_list = [func( - request, operation_log)['operation_log'] - for operation_log in operation_logs] - operation_logs_links = self._get_collection_links( - request, operation_logs, coll_name, operation_log_count) - operation_logs_dict = {} - operation_logs_dict['operation_logs'] = operation_logs_list - if operation_logs_links: - operation_logs_dict['operation_logs_links'] = ( - operation_logs_links) - - return operation_logs_dict - - -class OperationLogsController(wsgi.Controller): - """The operation_log API controller for the OpenStack API.""" - - _view_builder_class = OperationLogViewBuilder - - def __init__(self): - self.operationengine_api = operationengine_api.API() - self.protection_api = protection_api.API() - super(OperationLogsController, self).__init__() - - def show(self, req, id): - """Return data about the given OperationLogs.""" - context = req.environ['karbor.context'] - - LOG.info("Show operation log with id: %s", id, context=context) - - if not uuidutils.is_uuid_like(id): - msg = _("Invalid operation log id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - try: - operation_log = self._operation_log_get(context, id) - except exception.OperationLogFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - LOG.info("Show operation log request issued successfully.") - return self._view_builder.detail(req, operation_log) - - def index(self, req): - """Returns a list of operation_logs. - - """ - context = req.environ['karbor.context'] - - LOG.info("Show operation log list", context=context) - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - utils.remove_invalid_filter_options( - context, - filters, - self._get_operation_log_filter_options()) - - utils.check_filters(filters) - operation_logs = self._get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - retval_operation_logs = self._view_builder.detail_list( - req, operation_logs) - - LOG.info("Show operation_log list request issued " - "successfully.") - - return retval_operation_logs - - def _get_all(self, context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - context.can(operation_log_policy.GET_ALL_POLICY) - - if filters is None: - filters = {} - - all_tenants = utils.get_bool_param('all_tenants', filters) - - if filters: - LOG.debug("Searching by: %s.", six.text_type(filters)) - - if context.is_admin and all_tenants: - # Need to remove all_tenants to pass the filtering below. - del filters['all_tenants'] - operation_logs = objects.OperationLogList.get_all( - context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - else: - operation_logs = objects.OperationLogList.get_all_by_project( - context, context.project_id, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, - offset=offset) - - LOG.info("Get all operation_logs completed successfully.") - return operation_logs - - def _get_operation_log_filter_options(self): - """Return operation_log search options allowed by non-admin.""" - return CONF.query_operation_log_filters - - def _operation_log_get(self, context, operation_log_id): - if not uuidutils.is_uuid_like(operation_log_id): - msg = _("Invalid operation_log id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - operation_log = objects.OperationLog.get_by_id( - context, operation_log_id) - try: - context.can(operation_log_policy.GET_POLICY, operation_log) - except exception.PolicyNotAuthorized: - raise exception.OperationLogFound( - operation_log_id=operation_log_id) - LOG.info("Operation log info retrieved successfully.") - return operation_log - - -def create_resource(): - return wsgi.Resource(OperationLogsController()) diff --git a/karbor/api/v1/plans.py b/karbor/api/v1/plans.py deleted file mode 100644 index f4004614..00000000 --- a/karbor/api/v1/plans.py +++ /dev/null @@ -1,436 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The plans api.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import excutils -from oslo_utils import uuidutils - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import plans as plan_schema -from karbor.api import validation -from karbor.common import constants -from karbor.common import notification -from karbor.common.notification import StartNotification -from karbor import exception -from karbor.i18n import _ - -from karbor import objects -from karbor.objects import base as objects_base -from karbor.policies import plans as plan_policy -from karbor import quota -from karbor.services.operationengine import api as operationengine_api -from karbor.services.protection import api as protection_api -from karbor import utils - -import six - -query_plan_filters_opt = cfg.ListOpt('query_plan_filters', - default=['name', 'status', - 'description'], - help="Plan filter options which " - "non-admin user could use to " - "query plans. Default values " - "are: ['name', 'status', " - "'description']") -CONF = cfg.CONF -CONF.register_opt(query_plan_filters_opt) -QUOTAS = quota.QUOTAS - -LOG = logging.getLogger(__name__) - - -class PlanViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "plans" - - def detail(self, request, plan): - """Detailed view of a single plan.""" - - resources = plan.get('resources') - resources_list = [] - for resource in resources: - resource_dict = {} - resource_dict['id'] = resource.pop('id') - resource_dict['name'] = resource.pop('name') - resource_dict['type'] = resource.pop('type') - extra_info = resource.pop('extra_info', None) - if extra_info: - resource_dict['extra_info'] = jsonutils.loads( - extra_info) - resources_list.append(resource_dict) - plan_ref = { - 'plan': { - 'id': plan.get('id'), - 'name': plan.get('name'), - 'description': plan.get('description'), - 'resources': resources_list, - 'provider_id': plan.get('provider_id'), - 'status': plan.get('status'), - 'parameters': plan.get('parameters'), - } - } - return plan_ref - - def detail_list(self, request, plans, plan_count=None): - """Detailed view of a list of plans.""" - return self._list_view(self.detail, request, plans, - plan_count, - self._collection_name) - - def _list_view(self, func, request, plans, plan_count, - coll_name=_collection_name): - """Provide a view for a list of plans. - - :param func: Function used to format the plan data - :param request: API request - :param plans: List of plans in dictionary format - :param plan_count: Length of the original list of plans - :param coll_name: Name of collection, used to generate the next link - for a pagination query - :returns: Plan data in dictionary format - """ - plans_list = [func(request, plan)['plan'] for plan in plans] - plans_links = self._get_collection_links(request, - plans, - coll_name, - plan_count) - plans_dict = {} - plans_dict['plans'] = plans_list - if plans_links: - plans_dict['plans_links'] = plans_links - - return plans_dict - - -class PlansController(wsgi.Controller): - """The Plans API controller for the OpenStack API.""" - - _view_builder_class = PlanViewBuilder - - def __init__(self): - self.operationengine_api = operationengine_api.API() - self.protection_api = protection_api.API() - super(PlansController, self).__init__() - - def show(self, req, id): - """Return data about the given plan.""" - context = req.environ['karbor.context'] - - LOG.info("Show plan with id: %s", id, context=context) - - try: - plan = self._plan_get(context, id) - except exception.PlanNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - LOG.info("Show plan request issued successfully.", - resource={'id': plan.id}) - return self._view_builder.detail(req, plan) - - def delete(self, req, id): - """Delete a plan.""" - context = req.environ['karbor.context'] - - LOG.info("Delete plan with id: %s", id, context=context) - context.notification = notification.KarborPlanDelete( - context, request=req) - try: - plan = self._plan_get(context, id) - except exception.PlanNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - context.can(plan_policy.DELETE_POLICY, target_obj=plan) - project_id = plan.project_id - - try: - with StartNotification(context, id=id): - plan.destroy() - except Exception: - msg = _("Failed to destroy a plan.") - raise exc.HTTPServerError(reason=msg) - - try: - reserve_opts = {'plans': -1} - reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - LOG.exception("Failed to update usages deleting plan.") - else: - QUOTAS.commit(context, reservations, - project_id=project_id) - LOG.info("Delete plan request issued successfully.", - resource={'id': plan.id}) - - def index(self, req): - """Returns a list of plans, transformed through view builder.""" - context = req.environ['karbor.context'] - - LOG.info("Show plan list", context=context) - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - utils.remove_invalid_filter_options(context, - filters, - self._get_plan_filter_options()) - - utils.check_filters(filters) - plans = self._get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - retval_plans = self._view_builder.detail_list(req, plans) - - LOG.info("Show plan list request issued successfully.") - - return retval_plans - - def _get_all(self, context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - context.can(plan_policy.GET_ALL_POLICY) - - if filters is None: - filters = {} - - all_tenants = utils.get_bool_param('all_tenants', filters) - - if filters: - LOG.debug("Searching by: %s.", six.text_type(filters)) - - if context.is_admin and all_tenants: - # Need to remove all_tenants to pass the filtering below. - del filters['all_tenants'] - plans = objects.PlanList.get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - else: - plans = objects.PlanList.get_all_by_project( - context, context.project_id, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, - offset=offset) - - LOG.info("Get all plans completed successfully.") - return plans - - def _get_plan_filter_options(self): - """Return plan search options allowed by non-admin.""" - return CONF.query_plan_filters - - @validation.schema(plan_schema.create) - def create(self, req, body): - """Creates a new plan.""" - - LOG.debug('Create plan request body: %s', body) - context = req.environ['karbor.context'] - context.can(plan_policy.CREATE_POLICY) - plan = body['plan'] - LOG.debug('Create plan request plan: %s', plan) - context.notification = notification.KarborPlanCreate( - context, request=req) - - parameters = plan.get("parameters", None) - - self.validate_plan_resources(plan) - self.validate_plan_parameters(context, plan) - - resources = plan.get('resources', None) - if resources: - for resource in resources: - extra_info = resource.get('extra_info', None) - if extra_info is not None: - resource['extra_info'] = jsonutils.dumps(extra_info) - - plan_properties = { - 'name': plan.get('name', None), - 'description': plan.get('description', None), - 'provider_id': plan.get('provider_id', None), - 'project_id': context.project_id, - 'status': constants.PLAN_STATUS_SUSPENDED, - 'resources': resources, - 'parameters': parameters, - } - - try: - reserve_opts = {'plans': 1} - reservations = QUOTAS.reserve(context, **reserve_opts) - except exception.OverQuota as e: - quota.process_reserve_over_quota( - context, e, - resource='plans') - try: - plan = objects.Plan(context=context, **plan_properties) - with StartNotification( - context, name=plan.get('name', None)): - plan.create() - QUOTAS.commit(context, reservations) - except Exception: - with excutils.save_and_reraise_exception(): - try: - if plan and 'id' in plan: - plan.destroy() - finally: - QUOTAS.rollback(context, reservations) - - retval = self._view_builder.detail(req, plan) - - return retval - - @validation.schema(plan_schema.update) - def update(self, req, id, body): - """Update a plan.""" - context = req.environ['karbor.context'] - context.notification = notification.KarborPlanUpdate( - context, request=req) - - plan = body['plan'] - update_dict = {} - - valid_update_keys = { - 'name', - 'resources', - 'status', - 'description', - } - for key in valid_update_keys.intersection(plan): - update_dict[key] = plan[key] - - if not update_dict: - msg = _("Missing updated parameters in request body.") - raise exc.HTTPBadRequest(explanation=msg) - - if update_dict.get("resources"): - self.validate_plan_resources(update_dict) - - resources = update_dict.get('resources', None) - if resources: - for resource in resources: - extra_info = resource.get('extra_info', None) - if extra_info is not None: - resource['extra_info'] = jsonutils.dumps(extra_info) - - try: - plan = self._plan_get(context, id) - except exception.PlanNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - with StartNotification(context, id=id): - self._plan_update(context, plan, update_dict) - plan.update(update_dict) - - retval = self._view_builder.detail(req, plan) - return retval - - def _plan_get(self, context, plan_id): - if not uuidutils.is_uuid_like(plan_id): - msg = _("Invalid plan id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - plan = objects.Plan.get_by_id(context, plan_id) - try: - context.can(plan_policy.GET_POLICY, target_obj=plan) - except exception.PolicyNotAuthorized: - # raise PlanNotFound instead to make sure karbor behaves - # as it used to - raise exception.PlanNotFound(plan_id=plan_id) - LOG.info("Plan info retrieved successfully.", resource=plan) - return plan - - def _plan_update(self, context, plan, fields): - if plan['status'] != constants.PLAN_STATUS_SUSPENDED: - LOG.info("Unable to update plan, because it is in %s state.", - plan['status']) - msg = _("The plan can be only updated in suspended status.") - raise exception.InvalidPlan(reason=msg) - # TODO(chenying) replication scene: need call rpc API when - # the status of the plan is changed. - context.can(plan_policy.UPDATE_POLICY, target_obj=plan) - if isinstance(plan, objects_base.KarborObject): - plan.update(fields) - plan.save() - LOG.info("Plan updated successfully.", resource=plan) - else: - msg = _("The parameter plan must be a object of " - "KarborObject class.") - raise exception.InvalidInput(reason=msg) - - def validate_plan_resources(self, plan): - resources_list = plan["resources"] - if (isinstance(resources_list, list)) and (len(resources_list) > 0): - for resource in resources_list: - if (isinstance(resource, dict) and (len(resource) >= 3) and - {"id", "type", 'name'}.issubset(resource)): - pass - else: - msg = _("Resource in list must be a dict when creating a " - "plan.The keys of resource are id,type and name.") - raise exception.InvalidInput(reason=msg) - else: - msg = _("list resources must be provided when creating " - "a plan.") - raise exception.InvalidInput(reason=msg) - - def validate_plan_parameters(self, context, plan): - try: - provider = self.protection_api.show_provider( - context, plan["provider_id"]) - except Exception: - msg = _("The provider could not be found.") - raise exc.HTTPBadRequest(explanation=msg) - options_schema = provider.get( - "extended_info_schema", {}).get("options_schema", None) - if options_schema is None: - msg = _("The option_schema of plugin must be provided.") - raise exc.HTTPBadRequest(explanation=msg) - parameters = plan["parameters"] - if not parameters: - return - for resource_key, parameter_value in parameters.items(): - if "#" in resource_key: - resource_type, resource_id = resource_key.split("#") - if not uuidutils.is_uuid_like(resource_id): - msg = _("The resource_id must be a uuid.") - raise exc.HTTPBadRequest(explanation=msg) - else: - resource_type = resource_key - if resource_type not in constants.RESOURCE_TYPES: - msg = _("The key of plan parameters is invalid.") - raise exc.HTTPBadRequest(explanation=msg) - - if resource_type not in options_schema: - LOG.info("Found parameter for an unloaded resource type: %s", - resource_type) - continue - - properties = options_schema[resource_type]["properties"] - if not set(properties.keys()) >= set(parameter_value.keys()): - msg = _("The protect property of plan parameters " - "is invalid.") - raise exc.HTTPBadRequest(explanation=msg) - - -def create_resource(): - return wsgi.Resource(PlansController()) diff --git a/karbor/api/v1/protectables.py b/karbor/api/v1/protectables.py deleted file mode 100644 index 7fd2f6eb..00000000 --- a/karbor/api/v1/protectables.py +++ /dev/null @@ -1,294 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The protectables api.""" -from oslo_config import cfg -from oslo_log import log as logging - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor import exception -from karbor.i18n import _ - -from karbor.policies import protectables as protectable_policy -from karbor.services.protection import api as protection_api -from karbor import utils - -import six - -query_instance_filters_opts = [ - cfg.ListOpt( - 'query_instance_filters', - default=['status'], - help=( - "Instance filter options which non-admin user could use to " - "query instances. Default values are: ['status']" - ) - ), -] -CONF = cfg.CONF -CONF.register_opts(query_instance_filters_opts) -LOG = logging.getLogger(__name__) - - -class ProtectableViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "protectables" - - def show(self, request, protectable_type): - """Detailed view of a single protectable_type.""" - protectable_type_ref = { - 'protectable_type': { - 'name': protectable_type.get('name'), - 'dependent_types': protectable_type.get('dependent_types'), - } - } - return protectable_type_ref - - def detail(self, request, instance): - """Detailed view of a single instance.""" - instance_ref = { - 'instance': { - 'id': instance.get('id'), - 'type': instance.get('type'), - 'name': instance.get('name'), - 'extra_info': instance.get('extra_info'), - 'dependent_resources': instance.get('dependent_resources'), - } - } - return instance_ref - - def detail_list(self, request, instances, instance_count=None): - """Detailed view of a list of instances.""" - return self._list_view(self.detail, request, instances, - instance_count, - 'instances') - - def _list_view(self, func, request, instances, instance_count, - coll_name=_collection_name): - """Provide a view for a list of instance. - - :param func: Function used to format the instance data - :param request: API request - :param instances: List of instances in dictionary format - :param instance_count: Length of the original list of instances - :param coll_name: Name of collection, used to generate the next link - for a pagination query - :returns: instance data in dictionary format - """ - instances_list = [func(request, instance)['instance'] - for instance in instances] - instances_links = self._get_collection_links(request, - instances, - coll_name, - instance_count) - instances_dict = { - "instances": instances_list - } - if instances_links: - instances_dict['instances_links'] = instances_links - - return instances_dict - - -class ProtectablesController(wsgi.Controller): - """The Protectables API controller for the OpenStack API.""" - - _view_builder_class = ProtectableViewBuilder - - def __init__(self): - self.protection_api = protection_api.API() - super(ProtectablesController, self).__init__() - - def show(self, req, id): - """Return data about the given protectable_type.""" - context = req.environ['karbor.context'] - protectable_type = id - - LOG.info("Show the information of a given protectable type: %s", - protectable_type) - - protectable_types = self._get_all(context) - - if protectable_type not in protectable_types: - msg = _("Invalid protectable type provided.") - raise exception.InvalidInput(reason=msg) - - context.can(protectable_policy.GET_POLICY) - try: - retval_protectable_type = self.protection_api.\ - show_protectable_type(context, protectable_type) - except exception.ProtectableTypeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - LOG.info("Show the protectable type information issued successfully.") - return self._view_builder.show(req, retval_protectable_type) - - def index(self, req): - """Returns a list of protectable_types, - - transformed through view builder. - """ - context = req.environ['karbor.context'] - LOG.info("Show protectable type list", context=context) - - protectable_types = self._get_all(context) - retval_protectable_types = { - "protectable_type": protectable_types - } - - LOG.info("Show protectable type list request issued successfully.") - return retval_protectable_types - - def _get_all(self, context): - context.can(protectable_policy.GET_ALL_POLICY) - - protectable_types = self.protection_api.list_protectable_types(context) - - LOG.info("Get all protectable types completed successfully.") - return protectable_types - - def instances_index(self, req, protectable_type): - """Return data about the given protectable_type.""" - context = req.environ['karbor.context'] - LOG.info("Show the instances of a given protectable type: %s", - protectable_type) - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - utils.check_filters(filters) - parameters = filters.get("parameters", None) - - if parameters is not None: - if not isinstance(parameters, dict): - msg = _("The parameters must be a dict.") - raise exception.InvalidInput(reason=msg) - - utils.remove_invalid_filter_options( - context, - filters, - self._get_instance_filter_options()) - - protectable_types = self._get_all(context) - - if protectable_type not in protectable_types: - msg = _("Invalid protectable type provided.") - raise exception.InvalidInput(reason=msg) - - instances = self._instances_get_all( - context, protectable_type, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, - filters=filters, offset=offset, parameters=parameters) - - for instance in instances: - protectable_id = instance.get("id") - instance["type"] = protectable_type - protectable_name = instance.get("name", None) - if protectable_id is None: - raise exception.InvalidProtectableInstance() - dependents = self.protection_api.list_protectable_dependents( - context, protectable_id, protectable_type, protectable_name) - instance["dependent_resources"] = dependents - - retval_instances = self._view_builder.detail_list(req, instances) - - return retval_instances - - def _instances_get_all(self, context, protectable_type, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - filters=None, offset=None, parameters=None): - context.can(protectable_policy.INSTANCES_GET_ALL_POLICY) - - if filters is None: - filters = {} - - try: - if limit is not None: - limit = int(limit) - if limit <= 0: - msg = _('limit param must be positive') - raise exception.InvalidInput(reason=msg) - except ValueError: - msg = _('limit param must be an integer') - raise exception.InvalidInput(reason=msg) - - if filters: - LOG.debug("Searching by: %s.", six.text_type(filters)) - - instances = self.protection_api.list_protectable_instances( - context, protectable_type, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset, - parameters=parameters) - - LOG.info("Get all instances completed successfully.") - return instances - - def _get_instance_filter_options(self): - """Return instance search options allowed by non-admin.""" - return CONF.query_instance_filters - - def instances_show(self, req, protectable_type, protectable_id): - """Return a instance about the given protectable_type and id.""" - - context = req.environ['karbor.context'] - params = req.params.copy() - utils.check_filters(params) - parameters = params.get("parameters", None) - - LOG.info("Show the instance of a given protectable type: %s", - protectable_type) - - if parameters is not None: - if not isinstance(parameters, dict): - msg = _("The parameters must be a dict.") - raise exception.InvalidInput(reason=msg) - - protectable_types = self._get_all(context) - - if protectable_type not in protectable_types: - msg = _("Invalid protectable type provided.") - raise exception.InvalidInput(reason=msg) - - context.can(protectable_policy.INSTANCES_GET_POLICY) - try: - instance = self.protection_api.show_protectable_instance( - context, protectable_type, protectable_id, - parameters=parameters) - except exception.ProtectableResourceNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except Exception as err: - raise exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - if instance is None: - msg = _("The instance doesn't exist.") - raise exc.HTTPInternalServerError(explanation=msg) - - dependents = self.protection_api.list_protectable_dependents( - context, protectable_id, protectable_type, - instance.get("name", None)) - instance["dependent_resources"] = dependents - - retval_instance = self._view_builder.detail(req, instance) - return retval_instance - - -def create_resource(): - return wsgi.Resource(ProtectablesController()) diff --git a/karbor/api/v1/providers.py b/karbor/api/v1/providers.py deleted file mode 100644 index 39f19a12..00000000 --- a/karbor/api/v1/providers.py +++ /dev/null @@ -1,553 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The providers api.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import checkpoints as checkpoint_schema -from karbor.api import validation -from karbor.common import constants -from karbor.common import notification -from karbor.common.notification import StartNotification -from karbor import exception -from karbor.i18n import _ - -from karbor import objects -from karbor.policies import providers as provider_policy -from karbor import quota -from karbor.services.protection import api as protection_api -from karbor import utils - -import six - -query_provider_filters_opts = [ - cfg.ListOpt( - 'query_provider_filters', - default=['name', 'description'], - help=( - "Provider filter options which non-admin user could use to " - "query providers. Default values are: ['name', 'description']" - ) - ), -] -QUOTAS = quota.QUOTAS - -query_checkpoint_filters_opts = [ - cfg.ListOpt( - 'query_checkpoint_filters', - default=['project_id', 'plan_id', 'start_date', 'end_date'], - help=( - "Checkpoint filter options which non-admin user could use to " - "query checkpoints. Default values are: ['project_id', " - "'plan_id', 'start_date', 'end_date']" - ) - ), -] - -CONF = cfg.CONF -CONF.register_opts(query_provider_filters_opts) -CONF.register_opts(query_checkpoint_filters_opts) - -LOG = logging.getLogger(__name__) - - -class ProviderViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "providers" - - def detail(self, request, provider): - """Detailed view of a single provider.""" - provider_ref = { - 'provider': { - 'id': provider.get('id'), - 'name': provider.get('name'), - 'description': provider.get('description'), - 'extended_info_schema': provider.get('extended_info_schema'), - } - } - return provider_ref - - def detail_list(self, request, providers, provider_count=None): - """Detailed view of a list of providers.""" - return self._list_view(self.detail, request, providers, - provider_count, - self._collection_name) - - def _list_view(self, func, request, providers, provider_count, - coll_name=_collection_name): - """Provide a view for a list of provider. - - :param func: Function used to format the provider data - :param request: API request - :param providers: List of providers in dictionary format - :param provider_count: Length of the original list of providers - :param coll_name: Name of collection, used to generate the next link - for a pagination query - :returns: Provider data in dictionary format - """ - providers_list = [func(request, provider)['provider'] - for provider in providers] - providers_links = self._get_collection_links(request, - providers, - coll_name, - provider_count) - providers_dict = { - "providers": providers_list - } - if providers_links: - providers_dict['providers_links'] = providers_links - - return providers_dict - - -class CheckpointViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "checkpoints" - - def detail(self, request, checkpoint): - """Detailed view of a single checkpoint.""" - checkpoint_ref = { - 'checkpoint': { - 'id': checkpoint.get('id'), - 'project_id': checkpoint.get('project_id'), - 'status': checkpoint.get('status'), - 'protection_plan': checkpoint.get('protection_plan'), - 'resource_graph': checkpoint.get('resource_graph'), - 'created_at': checkpoint.get('created_at'), - 'extra_info': checkpoint.get('extra_info'), - } - } - return checkpoint_ref - - def detail_list(self, request, checkpoints, checkpoint_count=None): - """Detailed view of a list of checkpoints.""" - return self._list_view(self.detail, request, checkpoints, - checkpoint_count, - self._collection_name) - - def _list_view(self, func, request, checkpoints, checkpoint_count, - coll_name=_collection_name): - """Provide a view for a list of checkpoint. - - :param func: Function used to format the checkpoint data - :param request: API request - :param checkpoints: List of checkpoints in dictionary format - :param checkpoint_count: Length of the original list of checkpoints - :param coll_name: Name of collection, used to generate the next link - for a pagination query - :returns: Checkpoint data in dictionary format - """ - checkpoints_list = [func(request, checkpoint)['checkpoint'] - for checkpoint in checkpoints] - checkpoints_links = self._get_collection_links(request, - checkpoints, - coll_name, - checkpoint_count) - checkpoints_dict = { - "checkpoints": checkpoints_list - } - if checkpoints_links: - checkpoints_dict['checkpoints_links'] = checkpoints_links - - return checkpoints_dict - - -class ProvidersController(wsgi.Controller): - """The Providers API controller for the OpenStack API.""" - - _view_builder_class = ProviderViewBuilder - - def __init__(self): - self.protection_api = protection_api.API() - self._checkpoint_view_builder = CheckpointViewBuilder() - super(ProvidersController, self).__init__() - - def show(self, req, id): - """Return data about the given provider id.""" - context = req.environ['karbor.context'] - - LOG.info("Show provider with id: %s", id) - - try: - provider = self._provider_get(context, id) - except exception.ProviderNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - LOG.info("Show provider request issued successfully.", - resource={'id': provider.get("id")}) - return self._view_builder.detail(req, provider) - - def index(self, req): - """Returns a list of providers, transformed through view builder.""" - context = req.environ['karbor.context'] - - LOG.info("Show provider list", context=context) - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - utils.remove_invalid_filter_options( - context, - filters, - self._get_provider_filter_options()) - - utils.check_filters(filters) - providers = self._get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - retval_providers = self._view_builder.detail_list(req, providers) - - LOG.info("Show provider list request issued successfully.") - - return retval_providers - - def _get_all(self, context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - context.can(provider_policy.GET_ALL_POLICY) - - if filters is None: - filters = {} - - if filters: - LOG.debug("Searching by: %s.", six.text_type(filters)) - - providers = self.protection_api.list_providers( - context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - LOG.info("Get all providers completed successfully.") - return providers - - def _get_provider_filter_options(self): - """Return provider search options allowed by non-admin.""" - return CONF.query_provider_filters - - def _get_checkpoint_filter_options(self): - """Return checkpoint search options allowed by non-admin.""" - return CONF.query_checkpoint_filters - - def _provider_get(self, context, provider_id): - if not uuidutils.is_uuid_like(provider_id): - msg = _("Invalid provider id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - try: - context.can(provider_policy.GET_POLICY) - except exception.PolicyNotAuthorized: - # raise ProviderNotFound instead to make sure karbor behaves - # as it used to - raise exception.ProviderNotFound(provider_id=provider_id) - - provider = self.protection_api.show_provider(context, provider_id) - - LOG.info("Provider info retrieved successfully.") - return provider - - def checkpoints_index(self, req, provider_id): - """Returns a list of checkpoints, transformed through view builder.""" - context = req.environ['karbor.context'] - - LOG.info("Show checkpoints list. provider_id:%s", provider_id) - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - utils.remove_invalid_filter_options( - context, - filters, - self._get_checkpoint_filter_options()) - - utils.check_filters(filters) - checkpoints = self._checkpoints_get_all( - context, provider_id, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, - filters=filters, offset=offset) - - retval_checkpoints = self._checkpoint_view_builder.detail_list( - req, checkpoints) - - LOG.info("Show checkpoints list request issued successfully.") - return retval_checkpoints - - def _checkpoints_get_all(self, context, provider_id, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - context.can(provider_policy.CHECKPOINT_GET_ALL_POLICY) - - if filters is None: - filters = {} - all_tenants = utils.get_bool_param( - 'all_tenants', filters) and context.is_admin - try: - if limit is not None: - limit = int(limit) - if limit <= 0: - msg = _('limit param must be positive') - raise exception.InvalidInput(reason=msg) - except ValueError: - msg = _('limit param must be an integer') - raise exception.InvalidInput(reason=msg) - - if filters: - LOG.debug("Searching by: %s.", six.text_type(filters)) - - if all_tenants: - del filters['all_tenants'] - checkpoints = self.protection_api.list_checkpoints( - context, provider_id, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset, - all_tenants=all_tenants - ) - - LOG.info("Get all checkpoints completed successfully.") - return checkpoints - - @validation.schema(checkpoint_schema.create) - def checkpoints_create(self, req, provider_id, body): - """Creates a new checkpoint.""" - - context = req.environ['karbor.context'] - context.notification = notification.KarborCheckpointCreate( - context, request=req) - - LOG.debug('Create checkpoint request ' - 'body: %s provider_id:%s', body, provider_id) - - context.can(provider_policy.CHECKPOINT_CREATE_POLICY) - checkpoint = body['checkpoint'] - LOG.debug('Create checkpoint request checkpoint: %s', - checkpoint) - - if not provider_id: - msg = _("provider_id must be provided when creating " - "a checkpoint.") - raise exception.InvalidInput(reason=msg) - - plan_id = checkpoint.get("plan_id") - - plan = objects.Plan.get_by_id(context, plan_id) - if not plan: - raise exception.PlanNotFound(plan_id=plan_id) - - # check the provider_id - if provider_id != plan.get("provider_id"): - msg = _("The parameter provider_id is not the same as " - "the value in the plan.") - raise exception.InvalidPlan(reason=msg) - - extra_info = checkpoint.get("extra_info", None) - if extra_info is not None: - if not isinstance(extra_info, dict): - msg = _("The extra_info in checkpoint must be a dict when " - "creating a checkpoint.") - raise exception.InvalidInput(reason=msg) - elif not all(map(lambda s: isinstance(s, six.string_types), - extra_info.keys())): - msg = _("Key of extra_info in checkpoint must be string when" - "creating a checkpoint.") - raise exception.InvalidInput(reason=msg) - else: - extra_info = { - 'created_by': constants.MANUAL - } - - checkpoint_extra_info = None - if extra_info is not None: - checkpoint_extra_info = jsonutils.dumps(extra_info) - checkpoint_properties = { - 'project_id': context.project_id, - 'status': constants.CHECKPOINT_STATUS_PROTECTING, - 'provider_id': provider_id, - "protection_plan": { - "id": plan.get("id"), - "name": plan.get("name"), - "resources": plan.get("resources"), - }, - "extra_info": checkpoint_extra_info - } - - try: - reserve_opts = {'checkpoints': 1} - reservations = QUOTAS.reserve(context, **reserve_opts) - except exception.OverQuota as e: - quota.process_reserve_over_quota( - context, e, - resource='checkpoints') - else: - checkpoint_id = None - try: - with StartNotification( - context, checkpoint_properties=checkpoint_properties): - checkpoint_id = self.protection_api.protect( - context, plan, checkpoint_properties) - QUOTAS.commit(context, reservations) - except Exception as error: - if not checkpoint_id: - QUOTAS.rollback(context, reservations) - msg = _("Create checkpoint failed: %s") % error - raise exc.HTTPBadRequest(explanation=msg) - - checkpoint_properties['id'] = checkpoint_id - - LOG.info("Create the checkpoint successfully. checkpoint_id:%s", - checkpoint_id) - returnval = self._checkpoint_view_builder.detail( - req, checkpoint_properties) - return returnval - - def checkpoints_show(self, req, provider_id, checkpoint_id): - """Return data about the given checkpoint id.""" - context = req.environ['karbor.context'] - - LOG.info("Show checkpoint with id: %s.", checkpoint_id) - LOG.info("provider_id: %s.", provider_id) - - try: - checkpoint = self._checkpoint_get(context, provider_id, - checkpoint_id) - except exception.CheckpointNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except exception.AccessCheckpointNotAllowed as error: - raise exc.HTTPForbidden(explanation=error.msg) - - LOG.info("Show checkpoint request issued successfully.") - LOG.info("checkpoint: %s", checkpoint) - retval = self._checkpoint_view_builder.detail(req, checkpoint) - LOG.info("retval: %s", retval) - return retval - - def _checkpoint_get(self, context, provider_id, checkpoint_id): - if not uuidutils.is_uuid_like(provider_id): - msg = _("Invalid provider id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - if not uuidutils.is_uuid_like(checkpoint_id): - msg = _("Invalid checkpoint id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - try: - context.can(provider_policy.CHECKPOINT_GET_POLICY) - except exception.PolicyNotAuthorized: - # raise CheckpointNotFound instead to make sure karbor behaves - # as it used to - raise exception.CheckpointNotFound(checkpoint_id=checkpoint_id) - - checkpoint = self.protection_api.show_checkpoint( - context, provider_id, checkpoint_id) - - if checkpoint is None: - raise exception.CheckpointNotFound(checkpoint_id=checkpoint_id) - - LOG.info("Checkpoint info retrieved successfully.") - return checkpoint - - def checkpoints_delete(self, req, provider_id, checkpoint_id): - """Delete a checkpoint.""" - context = req.environ['karbor.context'] - context.can(provider_policy.CHECKPOINT_DELETE_POLICY) - context.notification = notification.KarborCheckpointDelete( - context, request=req) - - LOG.info("Delete checkpoint with id: %s.", checkpoint_id) - LOG.info("provider_id: %s.", provider_id) - try: - checkpoint = self._checkpoint_get(context, provider_id, - checkpoint_id) - except exception.CheckpointNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except exception.AccessCheckpointNotAllowed as error: - raise exc.HTTPForbidden(explanation=error.msg) - project_id = checkpoint.get('project_id') - - try: - with StartNotification(context, checkpoint_id=checkpoint_id): - self.protection_api.delete(context, provider_id, checkpoint_id) - except exception.DeleteCheckpointNotAllowed as error: - raise exc.HTTPForbidden(explantion=error.msg) - - try: - reserve_opts = {'checkpoints': -1} - reservations = QUOTAS.reserve( - context, project_id=project_id, **reserve_opts) - except Exception: - LOG.exception("Failed to update usages after deleting checkpoint.") - else: - QUOTAS.commit(context, reservations, project_id=project_id) - - LOG.info("Delete checkpoint request issued successfully.") - return {} - - def _checkpoint_reset_state(self, context, provider_id, - checkpoint_id, state): - try: - self.protection_api.reset_state(context, provider_id, - checkpoint_id, state) - except exception.AccessCheckpointNotAllowed as error: - raise exc.HTTPForbidden(explanation=error.msg) - except exception.CheckpointNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except exception.CheckpointNotBeReset as error: - raise exc.HTTPBadRequest(explanation=error.msg) - LOG.info("Reset checkpoint state request issued successfully.") - return {} - - @validation.schema(checkpoint_schema.update) - def checkpoints_update(self, req, provider_id, checkpoint_id, body): - """Reset a checkpoint's state""" - context = req.environ['karbor.context'] - context.notification = notification.KarborCheckpointUpdate( - context, request=req) - - LOG.info("Reset checkpoint state with id: %s", checkpoint_id) - LOG.info("provider_id: %s.", provider_id) - - if not uuidutils.is_uuid_like(provider_id): - msg = _("Invalid provider id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - if not uuidutils.is_uuid_like(checkpoint_id): - msg = _("Invalid checkpoint id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - context.can(provider_policy.CHECKPOINT_UPDATE_POLICY) - - with StartNotification(context, checkpoint_id=checkpoint_id): - state = body["os-resetState"]["state"] - return self._checkpoint_reset_state( - context, provider_id, checkpoint_id, state) - - -def create_resource(): - return wsgi.Resource(ProvidersController()) diff --git a/karbor/api/v1/quota_classes.py b/karbor/api/v1/quota_classes.py deleted file mode 100644 index 39fb44c1..00000000 --- a/karbor/api/v1/quota_classes.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Quota Class api.""" - -from oslo_config import cfg -from oslo_log import log as logging - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import quota_classes as quota_class_schema -from karbor.api import validation -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor.policies import quota_classes as quota_class_policy - -from karbor import quota - - -QUOTAS = quota.QUOTAS -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class QuotaClassesViewBuilder(common.ViewBuilder): - """Model a Quota Class API response as a python dictionary.""" - - _collection_name = "quota_class" - - def detail_list(self, request, quota, quota_class=None): - """Detailed view of a single quota class.""" - keys = ( - 'plans', - 'checkpoints', - ) - view = {key: quota.get(key) for key in keys} - if quota_class: - view['id'] = quota_class - return {self._collection_name: view} - - -class QuotaClassesController(wsgi.Controller): - """The Quota Class API controller for the OpenStack API.""" - - _view_builder_class = QuotaClassesViewBuilder - - def __init__(self): - super(QuotaClassesController, self).__init__() - - def show(self, req, id): - """Return data about the given quota class id.""" - context = req.environ['karbor.context'] - LOG.debug("Show quota class with name: %s", id, context=context) - quota_class_name = id - context.can(quota_class_policy.GET_POLICY) - try: - quota_class = QUOTAS.get_class_quotas(context, - quota_class_name) - except exception.NotAuthorized: - raise exc.HTTPForbidden() - - LOG.debug("Show quota class request issued successfully.", - resource={'id': id}) - return self._view_builder.detail_list(req, quota_class, - quota_class_name) - - @validation.schema(quota_class_schema.update) - def update(self, req, id, body): - context = req.environ['karbor.context'] - - LOG.info("Update quota class with name: %s", id, - context=context) - context.can(quota_class_policy.UPDATE_POLICY) - - quota_class_name = id - bad_keys = [] - for key, value in body.get('quota_class', {}).items(): - if key not in QUOTAS: - bad_keys.append(key) - continue - if key in QUOTAS and value: - try: - value = int(value) - except (ValueError, TypeError): - msg = _("Quota '%(value)s' for %(key)s should be " - "integer.") % {'value': value, 'key': key} - LOG.warning(msg) - raise exc.HTTPBadRequest(explanation=msg) - - for key in body['quota_class'].keys(): - if key in QUOTAS: - value = int(body['quota_class'][key]) - self._validate_quota_limit(value) - try: - db.quota_class_update( - context, quota_class_name, key, value) - except exception.QuotaClassNotFound: - db.quota_class_create( - context, quota_class_name, key, value) - except exception.AdminRequired: - raise exc.HTTPForbidden() - - LOG.info("Update quota class successfully.", - resource={'id': quota_class_name}) - quota_class = QUOTAS.get_class_quotas(context, id) - return self._view_builder.detail_list(req, quota_class) - - def _validate_quota_limit(self, limit): - # NOTE: -1 is a flag value for unlimited - if limit < -1: - msg = _("Quota limit must be -1 or greater.") - raise exc.HTTPBadRequest(explanation=msg) - - -def create_resource(): - return wsgi.Resource(QuotaClassesController()) diff --git a/karbor/api/v1/quotas.py b/karbor/api/v1/quotas.py deleted file mode 100644 index 01d9e33e..00000000 --- a/karbor/api/v1/quotas.py +++ /dev/null @@ -1,192 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Quotas api.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import quotas as quota_schema -from karbor.api import validation -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor.policies import quotas as quota_policy - -from karbor import quota - - -QUOTAS = quota.QUOTAS -NON_QUOTA_KEYS = ['tenant_id', 'id'] -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class QuotasViewBuilder(common.ViewBuilder): - """Model a Quotas API response as a python dictionary.""" - - _collection_name = "quota" - - def detail_list(self, request, quota, project_id=None): - """Detailed view of a single quota.""" - keys = ( - 'plans', - 'checkpoints', - ) - view = {key: quota.get(key) for key in keys} - if project_id: - view['id'] = project_id - return {self._collection_name: view} - - -class QuotasController(wsgi.Controller): - """The Quotas API controller for the OpenStack API.""" - - _view_builder_class = QuotasViewBuilder - - def __init__(self): - super(QuotasController, self).__init__() - - def show(self, req, id): - """Return data about the given quota id.""" - context = req.environ['karbor.context'] - LOG.info("Show quotas with id: %s", id, context=context) - - if not uuidutils.is_uuid_like(id): - msg = _("Invalid project id provided.") - raise exc.HTTPBadRequest(explanation=msg) - context.can(quota_policy.GET_POLICY) - try: - db.authorize_project_context(context, id) - quota = self._get_quotas(context, id, usages=False) - except exception.NotAuthorized: - raise exc.HTTPForbidden() - - LOG.info("Show quotas request issued successfully.", - resource={'id': id}) - return self._view_builder.detail_list(req, quota, id) - - def detail(self, req, id): - """Return data about the given quota.""" - context = req.environ['karbor.context'] - LOG.info("Show quotas detail with id: %s", id, context=context) - - if not uuidutils.is_uuid_like(id): - msg = _("Invalid project id provided.") - raise exc.HTTPBadRequest(explanation=msg) - context.can(quota_policy.GET_POLICY) - try: - db.authorize_project_context(context, id) - quota = self._get_quotas(context, id, usages=True) - except exception.NotAuthorized: - raise exc.HTTPForbidden() - - LOG.info("Show quotas detail successfully.", - resource={'id': id}) - return self._view_builder.detail_list(req, quota, id) - - def defaults(self, req, id): - """Return data about the given quotas.""" - context = req.environ['karbor.context'] - - LOG.info("Show quotas defaults with id: %s", id, - context=context) - - if not uuidutils.is_uuid_like(id): - msg = _("Invalid project id provided.") - raise exc.HTTPBadRequest(explanation=msg) - context.can(quota_policy.GET_DEFAULT_POLICY) - quotas = QUOTAS.get_defaults(context) - - LOG.info("Show quotas defaults successfully.", - resource={'id': id}) - return self._view_builder.detail_list(req, quotas, id) - - @validation.schema(quota_schema.update) - def update(self, req, id, body): - context = req.environ['karbor.context'] - - LOG.info("Update quotas with id: %s", id, - context=context) - - if not uuidutils.is_uuid_like(id): - msg = _("Invalid project id provided.") - raise exc.HTTPBadRequest(explanation=msg) - context.can(quota_policy.UPDATE_POLICY) - - project_id = id - bad_keys = [] - for key, value in body.get('quota', {}).items(): - if (key not in QUOTAS and key not in - NON_QUOTA_KEYS): - bad_keys.append(key) - continue - if key not in NON_QUOTA_KEYS and value: - try: - value = int(value) - except (ValueError, TypeError): - msg = _("Quota '%(value)s' for %(key)s should be " - "integer.") % {'value': value, 'key': key} - LOG.warning(msg) - raise exc.HTTPBadRequest(explanation=msg) - - for key in body['quota'].keys(): - if key in QUOTAS: - value = int(body['quota'][key]) - self._validate_quota_limit(value) - try: - db.quota_update(context, project_id, key, value) - except exception.ProjectQuotaNotFound: - db.quota_create(context, project_id, key, value) - - LOG.info("Update quotas successfully.", - resource={'id': project_id}) - return self._view_builder.detail_list( - req, self._get_quotas(context, id)) - - def _validate_quota_limit(self, limit): - # NOTE: -1 is a flag value for unlimited - if limit < -1: - msg = _("Quota limit must be -1 or greater.") - raise exc.HTTPBadRequest(explanation=msg) - - def _get_quotas(self, context, id, usages=False): - values = QUOTAS.get_project_quotas(context, id, usages=usages) - - if usages: - return values - else: - return dict((k, v['limit']) for k, v in values.items()) - - def delete(self, req, id): - context = req.environ['karbor.context'] - LOG.info("Delete quotas with id: %s", id, - context=context) - - if not uuidutils.is_uuid_like(id): - msg = _("Invalid project id provided.") - raise exc.HTTPBadRequest(explanation=msg) - context.can(quota_policy.DELETE_POLICY) - QUOTAS.destroy_all_by_project(context, id) - - LOG.info("Delete quotas successfully.", - resource={'id': id}) - - -def create_resource(): - return wsgi.Resource(QuotasController()) diff --git a/karbor/api/v1/restores.py b/karbor/api/v1/restores.py deleted file mode 100644 index 1a377fbf..00000000 --- a/karbor/api/v1/restores.py +++ /dev/null @@ -1,279 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The restores api.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import restores as restore_schema -from karbor.api import validation -from karbor.common import constants -from karbor.common import notification -from karbor.common.notification import StartNotification -from karbor import exception -from karbor.i18n import _ - -from karbor import objects -from karbor.objects import base as objects_base -from karbor.policies import restores as restore_policy -from karbor.services.protection import api as protection_api -from karbor import utils - -import six - -query_restore_filters_opt = cfg.ListOpt( - 'query_restore_filters', - default=['status'], - help="Restore filter options which " - "non-admin user could use to " - "query restores. Default values " - "are: ['status']") -CONF = cfg.CONF -CONF.register_opt(query_restore_filters_opt) - -LOG = logging.getLogger(__name__) - - -class RestoreViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "restores" - - def detail(self, request, restore): - """Detailed view of a single restore.""" - restore_ref = { - 'restore': { - 'id': restore.get('id'), - 'project_id': restore.get('project_id'), - 'provider_id': restore.get('provider_id'), - 'checkpoint_id': restore.get('checkpoint_id'), - 'restore_target': restore.get('restore_target'), - 'parameters': restore.get('parameters'), - 'status': restore.get('status'), - 'resources_status': restore.get('resources_status'), - 'resources_reason': restore.get('resources_reason'), - } - } - return restore_ref - - def detail_list(self, request, restores, restore_count=None): - """Detailed view of a list of restores.""" - return self._list_view(self.detail, request, restores, - restore_count, - self._collection_name) - - def _list_view(self, func, request, restores, restore_count, - coll_name=_collection_name): - """Provide a view for a list of restores. - - :param func: Function used to format the restore data - :param request: API request - :param restores: List of restores in dictionary format - :param restore_count: Length of the original list of restores - :param coll_name: Name of collection, used to generate the next link - for a pagination query - :returns: restore data in dictionary format - """ - restores_list = [func(request, restore)['restore'] - for restore in restores] - restores_links = self._get_collection_links(request, - restores, - coll_name, - restore_count) - restores_dict = { - 'restores': restores_list - } - if restores_links: - restores_dict['restores_links'] = restores_links - - return restores_dict - - -class RestoresController(wsgi.Controller): - """The Restores API controller for the OpenStack API.""" - - _view_builder_class = RestoreViewBuilder - - def __init__(self): - self.protection_api = protection_api.API() - super(RestoresController, self).__init__() - - def show(self, req, id): - """Return data about the given restore.""" - context = req.environ['karbor.context'] - - LOG.info("Show restore with id: %s", id, context=context) - - if not uuidutils.is_uuid_like(id): - msg = _("Invalid restore id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - try: - restore = self._restore_get(context, id) - except exception.RestoreNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - LOG.info("Show restore request issued successfully.", - resource={'id': restore.id}) - return self._view_builder.detail(req, restore) - - def index(self, req): - """Returns a list of restores, transformed through view builder.""" - context = req.environ['karbor.context'] - - LOG.info("Show restore list", context=context) - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - utils.remove_invalid_filter_options( - context, - filters, - self._get_restore_filter_options()) - - utils.check_filters(filters) - restores = self._get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - retval_restores = self._view_builder.detail_list(req, restores) - - LOG.info("Show restore list request issued successfully.") - - return retval_restores - - def _get_all(self, context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - context.can(restore_policy.GET_ALL_POLICY) - - if filters is None: - filters = {} - - all_tenants = utils.get_bool_param('all_tenants', filters) - - if filters: - LOG.debug("Searching by: %s.", six.text_type(filters)) - - if context.is_admin and all_tenants: - # Need to remove all_tenants to pass the filtering below. - del filters['all_tenants'] - restores = objects.RestoreList.get_all( - context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - else: - restores = objects.RestoreList.get_all_by_project( - context, context.project_id, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, - offset=offset) - - LOG.info("Get all restores completed successfully.") - return restores - - def _get_restore_filter_options(self): - """Return restores search options allowed by non-admin.""" - return CONF.query_restore_filters - - @validation.schema(restore_schema.create) - def create(self, req, body): - """Creates a new restore.""" - - LOG.debug('Create restore request body: %s', body) - context = req.environ['karbor.context'] - context.can(restore_policy.CREATE_POLICY) - context.notification = notification.KarborRestoreCreate( - context, request=req) - restore = body['restore'] - LOG.debug('Create restore request : %s', restore) - - parameters = restore.get("parameters") - restore_auth = restore.get("restore_auth", None) - restore_properties = { - 'project_id': context.project_id, - 'provider_id': restore.get('provider_id'), - 'checkpoint_id': restore.get('checkpoint_id'), - 'restore_target': restore.get('restore_target'), - 'parameters': parameters, - 'status': constants.RESTORE_STATUS_IN_PROGRESS, - } - - restoreobj = objects.Restore(context=context, - **restore_properties) - restoreobj.create() - LOG.debug('call restore RPC : restoreobj:%s', restoreobj) - - # call restore rpc API of protection service - try: - with StartNotification(context, parameters=parameters): - self.protection_api.restore(context, restoreobj, restore_auth) - except exception.AccessCheckpointNotAllowed as error: - raise exc.HTTPForbidden(explanation=error.msg) - except Exception: - # update the status of restore - update_dict = { - "status": constants.RESTORE_STATUS_FAILURE - } - context.can(restore_policy.UPDATE_POLICY, restoreobj) - restoreobj = self._restore_update(context, - restoreobj.get("id"), - update_dict) - - retval = self._view_builder.detail(req, restoreobj) - - return retval - - def _restore_get(self, context, restore_id): - if not uuidutils.is_uuid_like(restore_id): - msg = _("Invalid restore id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - restore = objects.Restore.get_by_id(context, restore_id) - try: - context.can(restore_policy.GET_POLICY, restore) - except exception.PolicyNotAuthorized: - # raise RestoreNotFound instead to make sure karbor behaves - # as it used to - raise exception.RestoreNotFound(restore_id=restore_id) - LOG.info("Restore info retrieved successfully.") - return restore - - def _restore_update(self, context, restore_id, fields): - try: - restore = self._restore_get(context, restore_id) - except exception.RestoreNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - if isinstance(restore, objects_base.KarborObject): - restore.update(fields) - restore.save() - LOG.info("restore updated successfully.") - return restore - else: - msg = _("The parameter restore must be a object of " - "KarborObject class.") - raise exception.InvalidInput(reason=msg) - - -def create_resource(): - return wsgi.Resource(RestoresController()) diff --git a/karbor/api/v1/router.py b/karbor/api/v1/router.py deleted file mode 100644 index 4838289c..00000000 --- a/karbor/api/v1/router.py +++ /dev/null @@ -1,149 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_service import wsgi as base_wsgi - -from karbor.api.openstack import ProjectMapper -from karbor.api.v1 import copies -from karbor.api.v1 import operation_logs -from karbor.api.v1 import plans -from karbor.api.v1 import protectables -from karbor.api.v1 import providers -from karbor.api.v1 import quota_classes -from karbor.api.v1 import quotas -from karbor.api.v1 import restores -from karbor.api.v1 import scheduled_operations -from karbor.api.v1 import services -from karbor.api.v1 import triggers -from karbor.api.v1 import verifications - - -class APIRouter(base_wsgi.Router): - @classmethod - def factory(cls, global_conf, **local_conf): - return cls(ProjectMapper()) - - def __init__(self, mapper): - plans_resources = plans.create_resource() - restores_resources = restores.create_resource() - protectables_resources = protectables.create_resource() - providers_resources = providers.create_resource() - trigger_resources = triggers.create_resource() - scheduled_operation_resources = scheduled_operations.create_resource() - operation_log_resources = operation_logs.create_resource() - verification_resources = verifications.create_resource() - service_resources = services.create_resource() - quota_resources = quotas.create_resource() - quota_class_resources = quota_classes.create_resource() - copy_resources = copies.create_resource() - - mapper.resource("plan", "plans", - controller=plans_resources, - collection={}, - member={'action': 'POST'}) - mapper.resource("restore", "restores", - controller=restores_resources, - collection={}, - member={'action': 'POST'}) - mapper.resource("protectable", "protectables", - controller=protectables_resources, - collection={}, - member={}) - mapper.connect("protectable", - "/{project_id}/protectables/" - "{protectable_type}/instances", - controller=protectables_resources, - action='instances_index', - conditions={"method": ['GET']}) - mapper.connect("protectable", - "/{project_id}/protectables/" - "{protectable_type}/instances/{protectable_id}", - controller=protectables_resources, - action='instances_show', - conditions={"method": ['GET']}) - mapper.resource("provider", "providers", - controller=providers_resources, - collection={}, - member={}) - mapper.connect("provider", - "/{project_id}/providers/{provider_id}/checkpoints", - controller=providers_resources, - action='checkpoints_index', - conditions={"method": ['GET']}) - mapper.connect("provider", - "/{project_id}/providers/{provider_id}/checkpoints", - controller=providers_resources, - action='checkpoints_create', - conditions={"method": ['POST']}) - mapper.connect("provider", - "/{project_id}/providers/{provider_id}/checkpoints/" - "{checkpoint_id}", - controller=providers_resources, - action='checkpoints_show', - conditions={"method": ['GET']}) - mapper.connect("provider", - "/{project_id}/providers/{provider_id}/checkpoints/" - "{checkpoint_id}", - controller=providers_resources, - action='checkpoints_delete', - conditions={"method": ['DELETE']}) - mapper.connect("provider", - "/{project_id}/providers/{provider_id}/checkpoints/" - "{checkpoint_id}", - controller=providers_resources, - action='checkpoints_update', - conditions={'method': ['PUT']}) - mapper.resource("trigger", "triggers", - controller=trigger_resources, - collection={}, - member={'action': 'POST'}) - mapper.resource("scheduled_operation", "scheduled_operations", - controller=scheduled_operation_resources, - collection={}, - member={'action': 'POST'}) - mapper.resource("operation_log", "operation_logs", - controller=operation_log_resources, - collection={}, - member={}) - mapper.resource("verification", "verifications", - controller=verification_resources, - collection={}, - member={'action': 'POST'}) - mapper.resource("os-service", "os-services", - controller=service_resources, - collection={}, - member={'action': 'POST'}) - mapper.resource("quota", "quotas", - controller=quota_resources, - collection={}, - member={'action': 'POST'}) - mapper.connect("quota", - "/{project_id}/quotas/{id}/defaults", - controller=quota_resources, - action='defaults', - conditions={"method": ['GET']}) - mapper.connect("quota", - "/{project_id}/quotas/{id}/detail", - controller=quota_resources, - action='detail', - conditions={"method": ['GET']}) - mapper.resource("quota_class", "quota_classes", - controller=quota_class_resources, - collection={}, - member={'action': 'POST'}) - mapper.connect("copy", - "/{project_id}/providers/{provider_id}/checkpoints/" - "action", - controller=copy_resources, - action='create', - conditions={"method": ['POST']}) - super(APIRouter, self).__init__(mapper) diff --git a/karbor/api/v1/scheduled_operations.py b/karbor/api/v1/scheduled_operations.py deleted file mode 100644 index 9cc71955..00000000 --- a/karbor/api/v1/scheduled_operations.py +++ /dev/null @@ -1,264 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The scheduled operations api.""" - -from oslo_log import log as logging -from oslo_utils import uuidutils -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import scheduled_operations as \ - scheduled_operation_schema -from karbor.api import validation -from karbor.common import notification -from karbor.common.notification import StartNotification -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.policies import scheduled_operations as scheduled_operation_policy -from karbor.services.operationengine import api as operationengine_api -from karbor import utils - -LOG = logging.getLogger(__name__) - - -class ScheduledOperationViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "scheduled_operations" - - def detail(self, request, operation): - """Detailed view of a single scheduled operation.""" - - operation_ref = { - 'scheduled_operation': { - 'id': operation.get('id'), - 'name': operation.get('name'), - 'description': operation.get('description'), - 'operation_type': operation.get('operation_type'), - 'trigger_id': operation.get('trigger_id'), - 'operation_definition': operation.get('operation_definition'), - 'enabled': operation.get('enabled'), - } - } - return operation_ref - - def detail_list(self, request, operations): - """Detailed view of a list of operations.""" - return self._list_view(self.detail, request, operations) - - def _list_view(self, func, request, operations): - operations_list = [func(request, item)['scheduled_operation'] - for item in operations] - - operations_links = self._get_collection_links(request, - operations, - self._collection_name, - ) - ret = {'operations': operations_list} - if operations_links: - ret['operations_links'] = operations_links - - return ret - - -class ScheduledOperationController(wsgi.Controller): - """The Scheduled Operation API controller for the OpenStack API.""" - - _view_builder_class = ScheduledOperationViewBuilder - - def __init__(self): - self.operationengine_api = operationengine_api.API() - super(ScheduledOperationController, self).__init__() - - @validation.schema(scheduled_operation_schema.create) - def create(self, req, body): - """Creates a new scheduled operation.""" - - LOG.debug('Create scheduled operation start') - - LOG.debug('Create a scheduled operation, request body: %s', body) - - context = req.environ['karbor.context'] - context.can(scheduled_operation_policy.CREATE_POLICY) - context.notification = notification.KarborScheduledOpsCreate( - context, request=req) - operation_info = body['scheduled_operation'] - - name = operation_info.get("name", None) - operation_type = operation_info.get("operation_type", None) - operation_definition = operation_info.get( - "operation_definition", None) - if not all([name, operation_type, operation_definition]): - msg = _("Operation name or type or definition is not provided.") - raise exc.HTTPBadRequest(explanation=msg) - - trigger_id = operation_info.get("trigger_id", None) - trigger = self._get_trigger_by_id(context, trigger_id) - if context.project_id != trigger.project_id: - msg = _("Invalid trigger id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - operation_obj = { - 'name': operation_info.get('name', None), - 'description': operation_info.get('description', None), - 'operation_type': operation_type, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'trigger_id': trigger_id, - 'operation_definition': operation_definition, - } - try: - operation = objects.ScheduledOperation(context=context, - **operation_obj) - operation.create() - except Exception as ex: - self._raise_unknown_exception(ex) - - try: - with StartNotification(context, operation_obj=operation_obj): - self._create_scheduled_operation(context, operation) - except Exception: - try: - operation.destroy() - except Exception: - pass - - raise - - return self._view_builder.detail(req, operation) - - def delete(self, req, id): - """Delete a scheduled operation.""" - - LOG.debug('Delete scheduled operation(%s) start', id) - - context = req.environ['karbor.context'] - context.notification = notification.KarborScheduledOpsDelete( - context, request=req) - operation = self._get_operation_by_id(context, id, ['trigger']) - trigger = operation.trigger - - context.can(scheduled_operation_policy.DELETE_POLICY, operation) - - try: - with StartNotification(context, id=id): - self.operationengine_api.delete_scheduled_operation( - context, id, trigger.id) - - except (exception.ScheduledOperationStateNotFound, - exception.TriggerNotFound, - Exception) as ex: - self._raise_unknown_exception(ex) - - operation.destroy() - - def show(self, req, id): - """Return data about the given operation.""" - - LOG.debug('Get scheduled operation(%s) start', id) - - context = req.environ['karbor.context'] - operation = self._get_operation_by_id(context, id) - context.can(scheduled_operation_policy.GET_POLICY, operation) - - return self._view_builder.detail(req, operation) - - def index(self, req): - """Returns a list of operations, transformed through view builder.""" - - context = req.environ['karbor.context'] - context.can(scheduled_operation_policy.GET_ALL_POLICY) - - params = req.params.copy() - LOG.debug('List scheduled operation start, params=%s', params) - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - valid_filters = ["all_tenants", "name", "operation_type", - "trigger_id", "operation_definition"] - utils.remove_invalid_filter_options(context, filters, valid_filters) - utils.check_filters(filters) - - all_tenants = utils.get_bool_param("all_tenants", filters) - if not (context.is_admin and all_tenants): - filters["project_id"] = context.project_id - - try: - operations = objects.ScheduledOperationList.get_by_filters( - context, filters, limit, marker, sort_keys, sort_dirs) - except Exception as ex: - self._raise_unknown_exception(ex) - - return self._view_builder.detail_list(req, operations) - - def _get_operation_by_id(self, context, id, expect_attrs=[]): - if not uuidutils.is_uuid_like(id): - msg = _("Invalid operation id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - try: - operation = objects.ScheduledOperation.get_by_id( - context, id, expect_attrs) - except exception.ScheduledOperationNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except Exception as ex: - self._raise_unknown_exception(ex) - - return operation - - def _get_trigger_by_id(self, context, trigger_id): - if not uuidutils.is_uuid_like(trigger_id): - msg = _("Invalid trigger id provided.") - raise exc.HTTPBadRequest(explanation=msg) - try: - trigger = objects.Trigger.get_by_id(context, trigger_id) - except exception.NotFound as ex: - raise exc.HTTPNotFound(explanation=ex.msg) - except Exception as ex: - self._raise_unknown_exception(ex) - - return trigger - - def _create_scheduled_operation(self, context, operation): - try: - self.operationengine_api.create_scheduled_operation( - context, operation) - - except (exception.InvalidInput, - exception.ScheduledOperationExist, - exception.TriggerIsInvalid, - exception.InvalidOperationDefinition) as ex: - raise exc.HTTPBadRequest(explanation=ex.msg) - - except (exception.TriggerNotFound, - exception.AuthorizationFailure, - Exception) as ex: - self._raise_unknown_exception(ex) - - def _raise_unknown_exception(self, exception_instance): - LOG.exception('An unknown exception happened') - - value = exception_instance.msg if isinstance( - exception_instance, exception.KarborException) else type( - exception_instance) - msg = (_('Unexpected API Error. Please report this at ' - 'http://bugs.launchpad.net/karbor/ and attach the ' - 'Karbor API log if possible.\n%s') % value) - raise exc.HTTPInternalServerError(explanation=msg) - - -def create_resource(): - return wsgi.Resource(ScheduledOperationController()) diff --git a/karbor/api/v1/services.py b/karbor/api/v1/services.py deleted file mode 100644 index 192d9d7b..00000000 --- a/karbor/api/v1/services.py +++ /dev/null @@ -1,128 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The service management api.""" -from oslo_log import log as logging -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.policies import services as service_policy -from karbor import utils - -LOG = logging.getLogger(__name__) - -SERVICES_CAN_BE_UPDATED = ['karbor-operationengine'] - - -class ServiceViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "services" - - def detail(self, request, service): - """Detailed view of a single service.""" - service_ref = { - 'service': { - 'id': service.get('id'), - 'binary': service.get('binary'), - 'host': service.get('host'), - 'status': 'disabled' if service.get('disabled') else 'enabled', - 'state': 'up' if utils.service_is_up(service) else 'down', - 'updated_at': service.get('updated_at'), - 'disabled_reason': service.get('disabled_reason') - } - } - return service_ref - - def detail_list(self, request, services, service_count=None): - """Detailed view of a list of services.""" - return self._list_view(self.detail, request, services) - - def _list_view(self, func, request, services): - """Provide a view for a list of service. - - :param func: Function used to format the service data - :param request: API request - :param services: List of services in dictionary format - :returns: Service data in dictionary format - """ - services_list = [func(request, service)['service'] - for service in services] - services_dict = { - "services": services_list - } - - return services_dict - - -class ServiceController(wsgi.Controller): - """The Service Management API controller for the OpenStack API.""" - - _view_builder_class = ServiceViewBuilder - - def __init__(self): - super(ServiceController, self).__init__() - - def index(self, req): - """Returns a list of services - - transformed through view builder. - """ - context = req.environ['karbor.context'] - context.can(service_policy.GET_ALL_POLICY) - host = req.GET['host'] if 'host' in req.GET else None - binary = req.GET['binary'] if 'binary' in req.GET else None - try: - services = objects.ServiceList.get_all_by_args( - context, host, binary) - except Exception as e: - msg = (_('List service failed, reason: %s') % e) - raise exc.HTTPBadRequest(explanation=msg) - return self._view_builder.detail_list(req, services) - - def update(self, req, id, body): - """Enable/Disable scheduling for a service""" - - context = req.environ['karbor.context'] - context.can(service_policy.UPDATE_POLICY) - try: - service = objects.Service.get_by_id(context, id) - except exception.ServiceNotFound as e: - raise exc.HTTPNotFound(explanation=e.message) - - if service.binary not in SERVICES_CAN_BE_UPDATED: - msg = (_('Updating a %(binary)s service is not supported. Only ' - 'karbor-operationengine services can be updated.') % - {'binary': service.binary}) - raise exc.HTTPBadRequest(explanation=msg) - - if 'status' in body: - if body['status'] == 'enabled': - if body.get('disabled_reason'): - msg = _("Specifying 'disabled_reason' with status " - "'enabled' is invalid.") - raise exc.HTTPBadRequest(explanation=msg) - service.disabled = False - service.disabled_reason = None - elif body['status'] == 'disabled': - service.disabled = True - service.disabled_reason = body.get('disabled_reason') - service.save() - return self._view_builder.detail(req, service) - - -def create_resource(): - return wsgi.Resource(ServiceController()) diff --git a/karbor/api/v1/triggers.py b/karbor/api/v1/triggers.py deleted file mode 100644 index 2ea6eb96..00000000 --- a/karbor/api/v1/triggers.py +++ /dev/null @@ -1,265 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The triggers api.""" - -from datetime import datetime -from oslo_log import log as logging -from oslo_utils import uuidutils -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import triggers as trigger_schema -from karbor.api import validation -from karbor.common import notification -from karbor.common.notification import StartNotification -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.policies import triggers as trigger_policy -from karbor.services.operationengine import api as operationengine_api -from karbor import utils - -LOG = logging.getLogger(__name__) - - -class TriggerViewBuilder(common.ViewBuilder): - """Model a trigger API response as a python dictionary.""" - - _collection_name = "triggers" - - def detail(self, request, trigger): - """Detailed view of a single trigger.""" - - trigger_ref = { - 'trigger_info': { - 'id': trigger.get('id'), - 'name': trigger.get('name'), - 'type': trigger.get('type'), - 'properties': trigger.get('properties'), - } - } - return trigger_ref - - def detail_list(self, request, triggers): - """Detailed view of a list of triggers.""" - return self._list_view(self.detail, request, triggers) - - def _list_view(self, func, request, triggers): - triggers_list = [func(request, item)['trigger_info'] - for item in triggers] - - triggers_links = self._get_collection_links(request, - triggers, - self._collection_name, - ) - ret = {'triggers': triggers_list} - if triggers_links: - ret['triggers_links'] = triggers_links - - return ret - - -class TriggersController(wsgi.Controller): - """The Triggers API controller for the OpenStack API.""" - - _view_builder_class = TriggerViewBuilder - - def __init__(self): - self.operationengine_api = operationengine_api.API() - super(TriggersController, self).__init__() - - @validation.schema(trigger_schema.create) - def create(self, req, body): - """Creates a new trigger.""" - - LOG.debug('Create trigger start') - - LOG.debug('Create a trigger, request body: %s', body) - - context = req.environ['karbor.context'] - context.can(trigger_policy.CREATE_POLICY) - trigger_info = body['trigger_info'] - context.notification = notification.KarborTriggerCreate( - context, request=req) - - trigger_name = trigger_info.get("name", None) - trigger_type = trigger_info.get("type", None) - trigger_property = trigger_info.get("properties", None) - - trigger_property.setdefault( - 'start_time', datetime.utcnow().replace(microsecond=0)) - trigger_definition = { - 'id': uuidutils.generate_uuid(), - 'name': trigger_name, - 'project_id': context.project_id, - 'type': trigger_type, - 'properties': trigger_property, - } - try: - with StartNotification( - context, name=trigger_name): - trigger = objects.Trigger( - context=context, **trigger_definition) - self.operationengine_api.verify_trigger(context, trigger) - self.operationengine_api.create_trigger(context, trigger) - trigger.create() - except exception.Invalid as ex: - raise exc.HTTPBadRequest(explanation=ex.msg) - except Exception as ex: - self._raise_unknown_exception(ex) - - return self._view_builder.detail(req, trigger) - - def delete(self, req, id): - """Delete a trigger.""" - - LOG.debug('Delete trigger(%s) start', id) - - context = req.environ['karbor.context'] - trigger = self._get_trigger_by_id(context, id) - context.notification = notification.KarborTriggerDelete( - context, request=req) - - context.can(trigger_policy.DELETE_POLICY, trigger) - - try: - operations = objects.ScheduledOperationList.get_by_filters( - context, {"trigger_id": id}, limit=1) - except Exception as ex: - self._raise_unknown_exception(ex) - - if operations: - msg = _("Trigger is being used by one or more operations") - raise exc.HTTPFailedDependency(explanation=msg) - - try: - with StartNotification(context, id=id): - self.operationengine_api.delete_trigger(context, id) - except exception.TriggerNotFound: - pass - except (exception.DeleteTriggerNotAllowed, - Exception) as ex: - self._raise_unknown_exception(ex) - - trigger.destroy() - - @validation.schema(trigger_schema.update) - def update(self, req, id, body): - """Update a trigger""" - - LOG.debug('Update trigger(%s) start', id) - - context = req.environ['karbor.context'] - trigger = self._get_trigger_by_id(context, id) - context.notification = notification.KarborTriggerUpdate( - context, request=req) - - context.can(trigger_policy.UPDATE_POLICY, trigger) - - trigger_info = body['trigger_info'] - trigger_name = trigger_info.get("name", None) - trigger_property = trigger_info.get("properties", None) - - if trigger_name: - self.validate_name_and_description(trigger_info) - trigger.name = trigger_name - - if trigger_property: - start_time = trigger_property.get('start_time', None) - if not start_time: - msg = (_("start_time should be supplied")) - raise exc.HTTPBadRequest(explanation=msg) - try: - trigger.properties = trigger_property - self.operationengine_api.verify_trigger(context, trigger) - self.operationengine_api.update_trigger(context, trigger) - except exception.InvalidInput as ex: - raise exc.HTTPBadRequest(explanation=ex.msg) - except (exception.TriggerNotFound, Exception) as ex: - self._raise_unknown_exception(ex) - try: - with StartNotification(context, id=id): - trigger.save() - except Exception as ex: - self._raise_unknown_exception(ex) - - return self._view_builder.detail(req, trigger) - - def show(self, req, id): - """Return data about the given trigger.""" - - LOG.debug('Get trigger(%s) start', id) - - context = req.environ['karbor.context'] - trigger = self._get_trigger_by_id(context, id) - - context.can(trigger_policy.GET_POLICY, trigger) - return self._view_builder.detail(req, trigger) - - def index(self, req): - """Returns a list of triggers, transformed through view builder.""" - - context = req.environ['karbor.context'] - context.can(trigger_policy.GET_ALL_POLICY) - - params = req.params.copy() - LOG.debug('List triggers start, params=%s', params) - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - valid_filters = ["all_tenants", "name", "type", "properties"] - utils.remove_invalid_filter_options(context, filters, valid_filters) - utils.check_filters(filters) - - all_tenants = utils.get_bool_param("all_tenants", filters) - if not (context.is_admin and all_tenants): - filters["project_id"] = context.project_id - - try: - triggers = objects.TriggerList.get_by_filters( - context, filters, limit, marker, sort_keys, sort_dirs) - except Exception as ex: - self._raise_unknown_exception(ex) - - return self._view_builder.detail_list(req, triggers) - - def _get_trigger_by_id(self, context, id): - if not uuidutils.is_uuid_like(id): - msg = _("Invalid trigger id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - try: - trigger = objects.Trigger.get_by_id(context, id) - except exception.TriggerNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - except Exception as ex: - self._raise_unknown_exception(ex) - - return trigger - - def _raise_unknown_exception(self, exception_instance): - LOG.exception('An unknown exception happened') - - value = exception_instance.msg if isinstance( - exception_instance, exception.KarborException) else type( - exception_instance) - msg = (_('Unexpected API Error. Please report this at ' - 'http://bugs.launchpad.net/karbor/ and attach the ' - 'Karbor API log if possible.\n%s') % value) - raise exc.HTTPInternalServerError(explanation=msg) - - -def create_resource(): - return wsgi.Resource(TriggersController()) diff --git a/karbor/api/v1/verifications.py b/karbor/api/v1/verifications.py deleted file mode 100644 index deab8782..00000000 --- a/karbor/api/v1/verifications.py +++ /dev/null @@ -1,260 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The verification api.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils - -from webob import exc - -from karbor.api import common -from karbor.api.openstack import wsgi -from karbor.api.schemas import verifications as verification_schema -from karbor.api import validation -from karbor.common import constants -from karbor import exception -from karbor.i18n import _ - -from karbor import objects -from karbor.objects import base as objects_base -from karbor.policies import verifications as verification_policy -from karbor.services.protection import api as protection_api -from karbor import utils - -import six - -query_verification_filters_opt = cfg.ListOpt( - 'query_verification_filters', - default=['status'], - help="Verification filter options which " - "non-admin user could use to " - "query verifications. Default values " - "are: ['status']") -CONF = cfg.CONF -CONF.register_opt(query_verification_filters_opt) - -LOG = logging.getLogger(__name__) - - -class VerificationViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "verifications" - - def detail(self, request, verification): - """Detailed view of a single verification.""" - verification_ref = { - 'verification': { - 'id': verification.get('id'), - 'project_id': verification.get('project_id'), - 'provider_id': verification.get('provider_id'), - 'checkpoint_id': verification.get('checkpoint_id'), - 'parameters': verification.get('parameters'), - 'status': verification.get('status'), - 'resources_status': verification.get('resources_status'), - 'resources_reason': verification.get('resources_reason'), - } - } - return verification_ref - - def detail_list(self, request, verifications, verification_count=None): - """Detailed view of a list of verifications.""" - return self._list_view(self.detail, request, verifications, - verification_count, - self._collection_name) - - def _list_view(self, func, request, verifications, verification_count, - coll_name=_collection_name): - """Provide a view for a list of verifications. - - :param func: Function used to format the verification data - :param request: API request - :param verifications: List of verifications in dictionary format - :param verification_count: Length of the original list of verifications - :param coll_name: Name of collection, used to generate the next link - for a pagination query - :returns: verification data in dictionary format - """ - verifications_list = [func(request, verification)['verification'] - for verification in verifications] - verifications_links = self._get_collection_links(request, - verifications, - coll_name, - verification_count) - verifications_dict = { - 'verifications': verifications_list - } - if verifications_links: - verifications_dict['verifications_links'] = verifications_links - - return verifications_dict - - -class VerificationsController(wsgi.Controller): - """The verifications API controller for the OpenStack API.""" - - _view_builder_class = VerificationViewBuilder - - def __init__(self): - self.protection_api = protection_api.API() - super(VerificationsController, self).__init__() - - def show(self, req, id): - """Return data about the given verification.""" - context = req.environ['karbor.context'] - - LOG.info("Show verification with id: %s", id, context=context) - - try: - verification = self._verification_get(context, id) - except exception.VerificationNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - LOG.info("Show verification request issued successfully.", - resource={'id': verification.id}) - return self._view_builder.detail(req, verification) - - def index(self, req): - """Returns a list of verifications.""" - context = req.environ['karbor.context'] - - LOG.info("Show verification list", context=context) - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - utils.remove_invalid_filter_options( - context, - filters, - CONF.query_verification_filters) - - utils.check_filters(filters) - verifications = self._get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - retval_verifications = self._view_builder.detail_list(req, - verifications) - - LOG.info("Show verification list request issued successfully.") - - return retval_verifications - - def _get_all(self, context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - context.can(verification_policy.GET_ALL_POLICY) - - if filters is None: - filters = {} - - all_tenants = utils.get_bool_param('all_tenants', filters) - - if filters: - LOG.debug("Searching by: %s.", six.text_type(filters)) - - if context.is_admin and all_tenants: - # Need to remove all_tenants to pass the filtering below. - del filters['all_tenants'] - verifications = objects.VerificationList.get_all( - context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - else: - verifications = objects.VerificationList.get_all_by_project( - context, context.project_id, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, - offset=offset) - - LOG.info("Get all verifications completed successfully.") - return verifications - - @validation.schema(verification_schema.create) - def create(self, req, body): - """Creates a new verification.""" - - LOG.debug('Create verification request body: %s', body) - context = req.environ['karbor.context'] - context.can(verification_policy.CREATE_POLICY) - verification = body['verification'] - LOG.debug('Create verification request : %s', verification) - - parameters = verification.get("parameters") - - verification_properties = { - 'project_id': context.project_id, - 'provider_id': verification.get('provider_id'), - 'checkpoint_id': verification.get('checkpoint_id'), - 'parameters': parameters, - 'status': constants.VERIFICATION_STATUS_IN_PROGRESS, - } - - verification_obj = objects.Verification(context=context, - **verification_properties) - verification_obj.create() - - try: - self.protection_api.verification(context, verification_obj) - except Exception: - update_dict = { - "status": constants.VERIFICATION_STATUS_FAILURE - } - verification_obj = self._verification_update( - context, - verification_obj.get("id"), - update_dict) - - retval = self._view_builder.detail(req, verification_obj) - - return retval - - def _verification_get(self, context, verification_id): - if not uuidutils.is_uuid_like(verification_id): - msg = _("Invalid verification id provided.") - raise exc.HTTPBadRequest(explanation=msg) - - verification = objects.Verification.get_by_id(context, verification_id) - try: - context.can(verification_policy.GET_POLICY, verification) - except exception.PolicyNotAuthorized: - # raise VerificationNotFound instead to make sure karbor behaves - # as it used to - raise exception.VerificationNotFound( - verification_id=verification_id) - LOG.info("Verification info retrieved successfully.") - return verification - - def _verification_update(self, context, verification_id, fields): - try: - verification = self._verification_get(context, verification_id) - except exception.VerificationNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - if isinstance(verification, objects_base.KarborObject): - verification.update(fields) - verification.save() - LOG.info("The verification updated successfully.") - return verification - else: - msg = _("The parameter verification must be a object of " - "KarborObject class.") - raise exception.InvalidInput(reason=msg) - - -def create_resource(): - return wsgi.Resource(VerificationsController()) diff --git a/karbor/api/validation/__init__.py b/karbor/api/validation/__init__.py deleted file mode 100644 index 223d6185..00000000 --- a/karbor/api/validation/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Request Body validating middleware. - -""" - -import functools - -from karbor.api.validation import validators - - -def schema(request_body_schema): - """Register a schema to validate request body. - - Registered schema will be used for validating request body just before - API method executing. - - :param dict request_body_schema: a schema to validate request body - - """ - - def add_validator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - schema_validator = validators._SchemaValidator( - request_body_schema) - schema_validator.validate(kwargs['body']) - - return func(*args, **kwargs) - return wrapper - - return add_validator diff --git a/karbor/api/validation/parameter_types.py b/karbor/api/validation/parameter_types.py deleted file mode 100644 index 82bd9f8e..00000000 --- a/karbor/api/validation/parameter_types.py +++ /dev/null @@ -1,165 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Common parameter types for validating request Body. - -""" - -import re -import unicodedata - -import six - - -def _is_printable(char): - """determine if a unicode code point is printable. - - This checks if the character is either "other" (mostly control - codes), or a non-horizontal space. All characters that don't match - those criteria are considered printable; that is: letters; - combining marks; numbers; punctuation; symbols; (horizontal) space - separators. - """ - category = unicodedata.category(char) - return (not category.startswith("C") and - (not category.startswith("Z") or category == "Zs")) - - -def _get_all_chars(): - for i in range(0xFFFF): - yield six.unichr(i) - - -# build a regex that matches all printable characters. This allows -# spaces in the middle of the name. Also note that the regexp below -# deliberately allows the empty string. This is so only the constraint -# which enforces a minimum length for the name is triggered when an -# empty string is tested. Otherwise it is not deterministic which -# constraint fails and this causes issues for some unittests when -# PYTHONHASHSEED is set randomly. - -def _build_regex_range(ws=True, invert=False, exclude=None): - """Build a range regex for a set of characters in utf8. - - This builds a valid range regex for characters in utf8 by - iterating the entire space and building up a set of x-y ranges for - all the characters we find which are valid. - - :param ws: should we include whitespace in this range. - :param exclude: any characters we want to exclude - :param invert: invert the logic - - The inversion is useful when we want to generate a set of ranges - which is everything that's not a certain class. For instance, - produce all all the non printable characters as a set of ranges. - """ - if exclude is None: - exclude = [] - regex = "" - # are we currently in a range - in_range = False - # last character we found, for closing ranges - last = None - # last character we added to the regex, this lets us know that we - # already have B in the range, which means we don't need to close - # it out with B-B. While the later seems to work, it's kind of bad form. - last_added = None - - def valid_char(char): - if char in exclude: - result = False - elif ws: - result = _is_printable(char) - else: - # Zs is the unicode class for space characters, of which - # there are about 10 in this range. - result = (_is_printable(char) and - unicodedata.category(char) != "Zs") - if invert is True: - return not result - return result - - # iterate through the entire character range. in_ - for c in _get_all_chars(): - if valid_char(c): - if not in_range: - regex += re.escape(c) - last_added = c - in_range = True - else: - if in_range and last != last_added: - regex += "-" + re.escape(last) - in_range = False - last = c - else: - if in_range: - regex += "-" + re.escape(c) - return regex - - -valid_description_regex_base = '^[%s]*$' - -valid_description_regex = valid_description_regex_base % ( - _build_regex_range()) - - -name = { - 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255, - 'format': 'name' -} - - -description = { - 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255, - 'pattern': valid_description_regex, -} - - -boolean = { - 'type': ['boolean', 'string'], - 'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on', - 'YES', 'Yes', 'yes', 'y', 't', - False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off', - 'NO', 'No', 'no', 'n', 'f'], -} - - -uuid = { - 'type': 'string', 'format': 'uuid' -} - - -metadata = { - 'type': ['object', 'null'], - 'patternProperties': { - '^[a-zA-Z0-9-_:.# ]{1,255}$': { - 'type': ['boolean', 'string', 'integer'] - } - }, - 'additionalProperties': False -} - -parameters = { - 'type': 'object', - 'patternProperties': { - '^[a-zA-Z0-9-_:.# ]{1,255}$': metadata - }, - 'additionalProperties': False -} - -resources = { - 'type': 'array', - 'items': { - 'type': 'object' - } -} diff --git a/karbor/api/validation/validators.py b/karbor/api/validation/validators.py deleted file mode 100644 index 18e9e4dc..00000000 --- a/karbor/api/validation/validators.py +++ /dev/null @@ -1,214 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Internal implementation of request Body validating middleware. - -""" - -import re - -import jsonschema -from jsonschema import exceptions as jsonschema_exc -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six - -from karbor import exception -from karbor.i18n import _ - - -def _soft_validate_additional_properties( - validator, additional_properties_value, param_value, schema): - """Validator function. - - If there are not any properties on the param_value that are not specified - in the schema, this will return without any effect. If there are any such - extra properties, they will be handled as follows: - - - if the validator passed to the method is not of type "object", this - method will return without any effect. - - if the 'additional_properties_value' parameter is True, this method will - return without any effect. - - if the schema has an additionalProperties value of True, the extra - properties on the param_value will not be touched. - - if the schema has an additionalProperties value of False and there - aren't patternProperties specified, the extra properties will be stripped - from the param_value. - - if the schema has an additionalProperties value of False and there - are patternProperties specified, the extra properties will not be - touched and raise validation error if pattern doesn't match. - """ - if (not validator.is_type(param_value, "object") or - additional_properties_value): - return - - properties = schema.get("properties", {}) - patterns = "|".join(schema.get("patternProperties", {})) - extra_properties = set() - for prop in param_value: - if prop not in properties: - if patterns: - if not re.search(patterns, prop): - extra_properties.add(prop) - else: - extra_properties.add(prop) - - if not extra_properties: - return - - if patterns: - error = "Additional properties are not allowed (%s %s unexpected)" - if len(extra_properties) == 1: - verb = "was" - else: - verb = "were" - yield jsonschema_exc.ValidationError( - error % (", ".join(repr(extra) for extra in extra_properties), - verb)) - else: - for prop in extra_properties: - del param_value[prop] - - -@jsonschema.FormatChecker.cls_checks('date-time') -def _validate_datetime_format(param_value): - try: - timeutils.parse_isotime(param_value) - except ValueError: - return False - else: - return True - - -@jsonschema.FormatChecker.cls_checks('name', exception.InvalidName) -def _validate_name(param_value): - if not param_value: - msg = "The 'name' can not be None." - raise exception.InvalidName(reason=msg) - elif len(param_value.strip()) == 0: - msg = "The 'name' can not be empty." - raise exception.InvalidName(reason=msg) - return True - - -@jsonschema.FormatChecker.cls_checks('uuid') -def _validate_uuid_format(instance): - return uuidutils.is_uuid_like(instance) - - -class FormatChecker(jsonschema.FormatChecker): - """A FormatChecker can output the message from cause exception - - We need understandable validation errors messages for users. When a - custom checker has an exception, the FormatChecker will output a - readable message provided by the checker. - """ - - def check(self, param_value, format): - """Check whether the param_value conforms to the given format. - - :argument param_value: the param_value to check - :type: any primitive type (str, number, bool) - :argument str format: the format that param_value should conform to - :raises: :exc:`FormatError` if param_value does not conform to format - """ - - if format not in self.checkers: - return - - # For safety reasons custom checkers can be registered with - # allowed exception types. Anything else will fall into the - # default formatter. - func, raises = self.checkers[format] - result, cause = None, None - - try: - result = func(param_value) - except raises as e: - cause = e - if not result: - msg = "%r is not a %r" % (param_value, format) - raise jsonschema_exc.FormatError(msg, cause=cause) - - -class _SchemaValidator(object): - """A validator class - - This class is changed from Draft4Validator to validate minimum/maximum - value of a string number(e.g. '10'). This changes can be removed when - we tighten up the API definition and the XML conversion. - Also FormatCheckers are added for checking data formats which would be - passed through cinder api commonly. - - """ - validator = None - validator_org = jsonschema.Draft4Validator - - def __init__(self, schema, relax_additional_properties=False): - validators = { - 'minimum': self._validate_minimum, - 'maximum': self._validate_maximum, - } - if relax_additional_properties: - validators[ - 'additionalProperties'] = _soft_validate_additional_properties - - validator_cls = jsonschema.validators.extend(self.validator_org, - validators) - format_checker = FormatChecker() - self.validator = validator_cls(schema, format_checker=format_checker) - - def validate(self, *args, **kwargs): - try: - self.validator.validate(*args, **kwargs) - except jsonschema.ValidationError as ex: - if isinstance(ex.cause, exception.InvalidName): - detail = ex.cause.msg - elif len(ex.path) > 0: - detail = _("Invalid input for field/attribute %(path)s." - " Value: %(value)s. %(message)s") % { - 'path': ex.path.pop(), 'value': ex.instance, - 'message': ex.message - } - else: - detail = ex.message - raise exception.ValidationError(detail=detail) - except TypeError as ex: - # NOTE: If passing non string value to patternProperties parameter, - # TypeError happens. Here is for catching the TypeError. - detail = six.text_type(ex) - raise exception.ValidationError(detail=detail) - - def _number_from_str(self, param_value): - try: - value = int(param_value) - except (ValueError, TypeError): - try: - value = float(param_value) - except (ValueError, TypeError): - return None - return value - - def _validate_minimum(self, validator, minimum, param_value, schema): - param_value = self._number_from_str(param_value) - if param_value is None: - return - return self.validator_org.VALIDATORS['minimum'](validator, minimum, - param_value, schema) - - def _validate_maximum(self, validator, maximum, param_value, schema): - param_value = self._number_from_str(param_value) - if param_value is None: - return - return self.validator_org.VALIDATORS['maximum'](validator, maximum, - param_value, schema) diff --git a/karbor/api/versions.py b/karbor/api/versions.py deleted file mode 100644 index 478535eb..00000000 --- a/karbor/api/versions.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.api.openstack import wsgi -from six.moves import http_client -import webob.dec - -from oslo_serialization import jsonutils - - -class Versions(object): - - @classmethod - def factory(cls, global_config, **local_config): - return cls() - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - """Respond to a request for all OpenStack API versions.""" - def build_version_object(version, path, status): - return { - 'id': 'v%s' % version, - 'status': status, - 'links': [ - { - 'rel': 'self', - 'href': '%s/data-protect/%s/' % (req.host_url, path), - }, - ], - } - - version_objs = [] - version_objs.extend([ - build_version_object(1.0, 'v1', 'CURRENT'), - ]) - - response = webob.Response(request=req, - status=http_client.MULTIPLE_CHOICES, - content_type='application/json') - response.body = jsonutils.dumps(dict(versions=version_objs)) - return response diff --git a/karbor/cmd/__init__.py b/karbor/cmd/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/cmd/api.py b/karbor/cmd/api.py deleted file mode 100644 index 13e5bccd..00000000 --- a/karbor/cmd/api.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Starter script for karbor OS API.""" - -import eventlet -eventlet.monkey_patch() - -import sys # noqa: E402 - -from oslo_config import cfg # noqa: E402 -from oslo_log import log as logging # noqa: E402 - -# Need to register global_opts -from karbor.common import config # noqa -from karbor import i18n # noqa: E402 -i18n.enable_lazy() -from karbor import objects # noqa: E402 -from karbor import rpc # noqa: E402 -from karbor import service # noqa: E402 -from karbor import version # noqa: E402 - - -CONF = cfg.CONF - - -def main(): - objects.register_all() - CONF(sys.argv[1:], project='karbor', - version=version.version_string()) - logging.setup(CONF, "karbor") - - rpc.init(CONF) - launcher = service.get_launcher() - server = service.WSGIService('osapi_karbor') - launcher.launch_service(server, workers=server.workers) - launcher.wait() diff --git a/karbor/cmd/manage.py b/karbor/cmd/manage.py deleted file mode 100644 index 50d2c8ca..00000000 --- a/karbor/cmd/manage.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env python -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" - CLI interface for karbor management. -""" - -import os -import sys - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import migration -from oslo_log import log as logging - -from karbor import i18n -i18n.enable_lazy() - -# Need to register global_opts -from karbor.common import config # noqa -from karbor import context # noqa: E402 -from karbor import db # noqa: E402 -from karbor.db import migration as db_migration # noqa: E402 -from karbor.db.sqlalchemy import api as db_api # noqa: E402 -from karbor.i18n import _ # noqa: E402 -from karbor import objects # noqa: E402 -from karbor import utils # noqa: E402 -from karbor import version # noqa: E402 - - -CONF = cfg.CONF - - -# Decorators for actions -def args(*args, **kwargs): - def _decorator(func): - func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) - return func - return _decorator - - -class DbCommands(object): - """Class for managing the database.""" - - @args('version', nargs='?', default=None, type=int, - help='Database version') - def sync(self, version=None): - """Sync the database up to the most recent version.""" - if version is not None and version > db.MAX_INT: - print(_('Version should be less than or equal to ' - '%(max_version)d.') % {'max_version': db.MAX_INT}) - sys.exit(1) - try: - return db_migration.db_sync(version) - except db_exc.DBMigrationError as ex: - print("Error during database migration: %s" % ex) - sys.exit(1) - - def version(self): - """Print the current database version.""" - print(db_migration.MIGRATE_REPO_PATH) - print(migration.db_version(db_api.get_engine(), - db_migration.MIGRATE_REPO_PATH, - db_migration.INIT_VERSION)) - - @args('age_in_days', type=int, - help='Purge deleted rows older than age in days') - def purge(self, age_in_days): - """Purge deleted rows older than a given age from karbor tables.""" - age_in_days = int(age_in_days) - if age_in_days <= 0: - print(_("Must supply a positive, non-zero value for age")) - sys.exit(1) - ctxt = context.get_admin_context() - - try: - db.purge_deleted_rows(ctxt, age_in_days) - except Exception as e: - print(_("Purge command failed, check karbor-manage " - "logs for more details. %s") % e) - sys.exit(1) - - -class VersionCommands(object): - """Class for exposing the codebase version.""" - - def list(self): - print(version.version_string()) - - def __call__(self): - self.list() - - -class ConfigCommands(object): - """Class for exposing the flags defined by flag_file(s).""" - - @args('param', nargs='?', default=None, - help='Configuration parameter to display (default: %(default)s)') - def list(self, param=None): - """List parameters configured for karbor. - - Lists all parameters configured for karbor unless an optional argument - is specified. If the parameter is specified we only print the - requested parameter. If the parameter is not found an appropriate - error is produced by .get*(). - """ - param = param and param.strip() - if param: - print('%s = %s' % (param, CONF.get(param))) - else: - for key, value in CONF.items(): - print('%s = %s' % (key, value)) - - -class ServiceCommands(object): - """Methods for managing services.""" - def list(self): - """Show a list of all karbor services.""" - - ctxt = context.get_admin_context() - services = db.service_get_all(ctxt) - print_format = "%-16s %-36s %-10s %-5s %-10s" - print(print_format % (_('Binary'), - _('Host'), - _('Status'), - _('State'), - _('Updated At'))) - for svc in services: - alive = utils.service_is_up(svc) - art = ":-)" if alive else "XXX" - status = 'enabled' - if svc['disabled']: - status = 'disabled' - print(print_format % (svc['binary'], svc['host'].partition('.')[0], - status, art, - svc['updated_at'])) - - -CATEGORIES = { - 'config': ConfigCommands, - 'db': DbCommands, - 'service': ServiceCommands, - 'version': VersionCommands, -} - - -def methods_of(obj): - """Return non-private methods from an object. - - Get all callable methods of an object that don't start with underscore - :return: a list of tuples of the form (method_name, method) - """ - result = [] - for i in dir(obj): - if callable(getattr(obj, i)) and not i.startswith('_'): - result.append((i, getattr(obj, i))) - return result - - -def add_command_parsers(subparsers): - for category in CATEGORIES: - command_object = CATEGORIES[category]() - - parser = subparsers.add_parser(category) - parser.set_defaults(command_object=command_object) - - category_subparsers = parser.add_subparsers(dest='action') - - for (action, action_fn) in methods_of(command_object): - parser = category_subparsers.add_parser(action) - - action_kwargs = [] - for args, kwargs in getattr(action_fn, 'args', []): - parser.add_argument(*args, **kwargs) - - parser.set_defaults(action_fn=action_fn) - parser.set_defaults(action_kwargs=action_kwargs) - - -category_opt = cfg.SubCommandOpt('category', - title='Command categories', - handler=add_command_parsers) - - -def get_arg_string(args): - arg = None - if args[0] == '-': - # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars - # is optional args. Notice that cfg module takes care of - # actual ArgParser so prefix_chars is always '-'. - if args[1] == '-': - # This is long optional arg - arg = args[2:] - else: - arg = args[1:] - else: - arg = args - - return arg - - -def fetch_func_args(func): - fn_args = [] - for args, kwargs in getattr(func, 'args', []): - arg = get_arg_string(args[0]) - fn_args.append(getattr(CONF.category, arg)) - - return fn_args - - -def main(): - """Parse options and call the appropriate class/method.""" - objects.register_all() - CONF.register_cli_opt(category_opt) - script_name = sys.argv[0] - if len(sys.argv) < 2: - print(_("\nOpenStack Karbor version: %(version)s\n") % - {'version': version.version_string()}) - print(script_name + " category action []") - print(_("Available categories:")) - for category in CATEGORIES: - print(_("\t%s") % category) - sys.exit(2) - - try: - CONF(sys.argv[1:], project='karbor', - version=version.version_string()) - logging.setup(CONF, "karbor") - except cfg.ConfigDirNotFoundError as details: - print(_("Invalid directory: %s") % details) - sys.exit(2) - except cfg.ConfigFilesNotFoundError: - cfgfile = CONF.config_file[-1] if CONF.config_file else None - if cfgfile and not os.access(cfgfile, os.R_OK): - st = os.stat(cfgfile) - print(_("Could not read %s, Please try running this" - "command again as root/Administrator privilege" - "using sudo.") % cfgfile) - try: - os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) - except Exception: - print(_('sudo failed, continuing as if nothing happened')) - - print(_('Please re-run karbor-manage as root.')) - sys.exit(2) - - fn = CONF.category.action_fn - fn_args = fetch_func_args(fn) - fn(*fn_args) diff --git a/karbor/cmd/operationengine.py b/karbor/cmd/operationengine.py deleted file mode 100644 index b0986a4c..00000000 --- a/karbor/cmd/operationengine.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for karbor OperationEngine.""" - -import eventlet -eventlet.monkey_patch() - -import sys # noqa: E402 - -from oslo_config import cfg # noqa: E402 -from oslo_log import log as logging # noqa: E402 - -from karbor import i18n # noqa: E402 -i18n.enable_lazy() -from karbor import objects # noqa: E402 - -# Need to register global_opts -from karbor.common import config # noqa -from karbor import service # noqa: E402 -from karbor import version # noqa: E402 - - -CONF = cfg.CONF - - -def main(): - objects.register_all() - CONF(sys.argv[1:], project='karbor', - version=version.version_string()) - logging.setup(CONF, "karbor") - server = service.Service.create(binary='karbor-operationengine') - service.serve(server) - service.wait() diff --git a/karbor/cmd/protection.py b/karbor/cmd/protection.py deleted file mode 100644 index 2082b76a..00000000 --- a/karbor/cmd/protection.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for karbor protection service.""" - -import eventlet -eventlet.monkey_patch() - -import sys # noqa: E402 - -from oslo_config import cfg # noqa: E402 -from oslo_log import log as logging # noqa: E402 - -from karbor import i18n # noqa: E402 -i18n.enable_lazy() -from karbor import objects # noqa: E402 - -# Need to register global_opts -from karbor.common import config # noqa -from karbor import service # noqa: E402 -from karbor import version # noqa: E402 - - -CONF = cfg.CONF - - -def main(): - objects.register_all() - CONF(sys.argv[1:], project='karbor', - version=version.version_string()) - logging.setup(CONF, "karbor") - server = service.Service.create(binary='karbor-protection') - service.serve(server) - service.wait() diff --git a/karbor/cmd/status.py b/karbor/cmd/status.py deleted file mode 100644 index c1c10b98..00000000 --- a/karbor/cmd/status.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2018 NEC, Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from oslo_config import cfg -from oslo_upgradecheck import upgradecheck - -from karbor.i18n import _ - -CONF = cfg.CONF - - -class Checks(upgradecheck.UpgradeCommands): - - """Contains upgrade checks - - Various upgrade checks should be added as separate methods in this class - and added to _upgrade_checks tuple. - """ - - def _sample_check(self): - """This is sample check added to test the upgrade check framework - - It needs to be removed after adding any real upgrade check - """ - return upgradecheck.Result(upgradecheck.Code.SUCCESS, 'Sample detail') - - _upgrade_checks = ( - # Sample check added for now. - # Whereas in future real checks must be added here in tuple - (_('Sample Check'), _sample_check), - ) - - -def main(): - return upgradecheck.main( - CONF, project='karbor', upgrade_command=Checks()) - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/karbor/common/__init__.py b/karbor/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/common/config.py b/karbor/common/config.py deleted file mode 100644 index 54cd9b4f..00000000 --- a/karbor/common/config.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command-line flag library. - -Emulates gflags by wrapping cfg.ConfigOpts. - -The idea is to move fully to cfg eventually, and this wrapper is a -stepping stone. - -""" - -import socket - -from oslo_config import cfg -from oslo_log import log as logging - - -CONF = cfg.CONF -logging.register_options(CONF) - -core_opts = [ - cfg.StrOpt('state_path', - default='/var/lib/karbor', - deprecated_name='pybasedir', - help="Top-level directory for maintaining karbor's state"), -] - -debug_opts = [ -] - -CONF.register_cli_opts(core_opts) -CONF.register_cli_opts(debug_opts) - -global_opts = [ - cfg.IntOpt('service_down_time', - default=60, - help='Maximum time since last check-in for a service to be ' - 'considered up'), - cfg.StrOpt('operationengine_topic', - default='karbor-operationengine', - help='The topic that OperationEngine nodes listen on'), - cfg.StrOpt('operationengine_manager', - default='karbor.services.operationengine.manager.' - 'OperationEngineManager', - help='Full class name for the Manager for OperationEngine'), - cfg.StrOpt('protection_topic', - default='karbor-protection', - help='The topic that protection nodes listen on'), - cfg.StrOpt('protection_manager', - default='karbor.services.protection.manager.ProtectionManager', - help='Full class name for the Manager for Protection'), - cfg.HostAddressOpt('host', - default=socket.gethostname(), - help='Name of this node. This can be an opaque ' - 'identifier. It is not necessarily a host ' - 'name, FQDN, or IP address.'), - cfg.StrOpt('auth_strategy', - default='keystone', - choices=['noauth', 'keystone'], - help='The strategy to use for auth. Supports noauth or ' - 'keystone.'), -] - -CONF.register_opts(global_opts) - - -service_client_opts = [ - cfg.StrOpt('service_name', - help='The name of service registered in Keystone'), - - cfg.StrOpt('service_type', - help='The type of service registered in Keystone'), - - cfg.StrOpt('version', - help='The version of service client'), - - cfg.StrOpt('region_id', - default='RegionOne', - help='The region id which the service belongs to.'), - - cfg.StrOpt('interface', - default='internal', - help='The network interface of the endpoint. Valid ' - 'values are: public, admin, internal.'), - - cfg.StrOpt('ca_cert_file', - help='Location of the CA certificate file ' - 'to use for client requests in SSL connections.'), - - cfg.BoolOpt('auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to service.') -] - - -keystone_client_opts = [ - cfg.StrOpt('auth_uri', - default='', - help='Unversioned keystone url in format like ' - 'http://0.0.0.0:5000.')] - - -def list_opts(): - yield 'clients_keystone', keystone_client_opts - - -for group, opts in list_opts(): - CONF.register_opts(opts, group=group) diff --git a/karbor/common/constants.py b/karbor/common/constants.py deleted file mode 100644 index 9eb21856..00000000 --- a/karbor/common/constants.py +++ /dev/null @@ -1,123 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# operation type -OPERATION_TYPES = ( - OPERATION_PROTECT, - OPERATION_RESTORE, - OPERATION_DELETE, - OPERATION_VERIFY, - OPERATION_COPY, -) = ( - 'protect', - 'restore', - 'delete', - 'verify', - 'copy' -) - - -# plugin type -PLUGIN_BANK = 'bank' - -# supported network resource types -NETWORK_RESOURCE_TYPES = (NET_RESOURCE_TYPE, - SUBNET_RESOURCE_TYPE, - ROUTER_RESOURCE_TYPE, - ROUTERINTERFACE_RESOURCE_TYPE, - PORT_RESOURCE_TYPE, - SECURITYGROUP_RESOURCE_TYPE, - ) = ('OS::Neutron::Net', - 'OS::Neutron::Subnet', - 'OS::Neutron::Router', - 'OS::Neutron::RouterInterface', - 'OS::Neutron::Port', - 'OS::Neutron::SecurityGroup', - ) - -# supported resource types -RESOURCE_TYPES = (PROJECT_RESOURCE_TYPE, - SERVER_RESOURCE_TYPE, - VOLUME_RESOURCE_TYPE, - IMAGE_RESOURCE_TYPE, - SHARE_RESOURCE_TYPE, - NETWORK_RESOURCE_TYPE, - DATABASE_RESOURCE_TYPE, - POD_RESOURCE_TYPE - ) = ('OS::Keystone::Project', - 'OS::Nova::Server', - 'OS::Cinder::Volume', - 'OS::Glance::Image', - 'OS::Manila::Share', - 'OS::Neutron::Network', - 'OS::Trove::Instance', - 'OS::Kubernetes::Pod', - ) -# plan status -PLAN_STATUS_SUSPENDED = 'suspended' -PLAN_STATUS_STARTED = 'started' - -CHECKPOINT_STATUS_ERROR = 'error' -CHECKPOINT_STATUS_PROTECTING = 'protecting' -CHECKPOINT_STATUS_WAIT_COPYING = 'wait_copying' -CHECKPOINT_STATUS_COPYING = 'copying' -CHECKPOINT_STATUS_COPY_FINISHED = 'finished' -CHECKPOINT_STATUS_AVAILABLE = 'available' -CHECKPOINT_STATUS_DELETING = 'deleting' -CHECKPOINT_STATUS_DELETED = 'deleted' -CHECKPOINT_STATUS_ERROR_DELETING = 'error-deleting' - -CHECKPOINT_CREATED_BY = (OPERATION_ENGINE, - MANUAL, - ) = ('operation-engine', - 'manual', - ) - -# resource status -RESOURCE_STATUS_ERROR = 'error' -RESOURCE_STATUS_PROTECTING = 'protecting' -RESOURCE_STATUS_STARTED = 'started' -RESOURCE_STATUS_AVAILABLE = 'available' -RESOURCE_STATUS_DELETING = 'deleting' -RESOURCE_STATUS_DELETED = 'deleted' -RESOURCE_STATUS_RESTORING = 'restoring' # use in restore object only -RESOURCE_STATUS_UNDEFINED = 'undefined' - -# scheduled operation state -OPERATION_STATE_INIT = 'init' -OPERATION_STATE_REGISTERED = 'registered' -OPERATION_STATE_RUNNING = 'running' -OPERATION_STATE_DELETED = 'deleted' - -# scheduled operation run type -OPERATION_RUN_TYPE_EXECUTE = 'execute' -OPERATION_RUN_TYPE_RESUME = 'resume' - -# scheduled operation execution state -OPERATION_EXE_STATE_IN_PROGRESS = 'in_progress' -OPERATION_EXE_STATE_SUCCESS = 'success' -OPERATION_EXE_STATE_FAILED = 'failed' -OPERATION_GET_MAX_BACKUP_STATE_FAILED = 'get_max_backup_policy_failed' -OPERATION_EXE_MAX_BACKUP_STATE_SUCCESS = 'excute_max_backup_policy_success' -OPERATION_EXE_MAX_BACKUP_STATE_FAILED = 'excute_max_backup_policy_failed' -OPERATION_GET_DURATION_STATE_FAILED = 'get_duration_policy_failed' -OPERATION_EXE_DURATION_STATE_SUCCESS = 'excute_duration_policy_success' -OPERATION_EXE_DURATION_STATE_FAILED = 'excute_duration_policy_failed' -OPERATION_EXE_STATE_DROPPED_OUT_OF_WINDOW = 'dropped_out_of_window' - -RESTORE_STATUS_SUCCESS = 'success' -RESTORE_STATUS_FAILURE = 'fail' -RESTORE_STATUS_IN_PROGRESS = 'in_progress' - -VERIFICATION_STATUS_SUCCESS = 'success' -VERIFICATION_STATUS_FAILURE = 'fail' -VERIFICATION_STATUS_IN_PROGRESS = 'in_progress' diff --git a/karbor/common/karbor_keystone_plugin.py b/karbor/common/karbor_keystone_plugin.py deleted file mode 100644 index c3540051..00000000 --- a/karbor/common/karbor_keystone_plugin.py +++ /dev/null @@ -1,183 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import access -from keystoneauth1.identity import access as access_plugin -from keystoneauth1 import loading -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as kc_v3 -from oslo_config import cfg -from oslo_log import log as logging - -from karbor import exception -from karbor import utils - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -# the config of trustee is like: -# [trustee] -# auth_type = password -# auth_url = http://192.168.1.2:35357 -# username = karbor -# password = password -# user_domain_id = default -KEYSTONECLIENT_VERSION = (3, 0) -TRUSTEE_CONF_GROUP = 'trustee' -loading.register_auth_conf_options(CONF, TRUSTEE_CONF_GROUP) -CONF.import_group('keystone_authtoken', - 'keystonemiddleware.auth_token') - - -class KarborKeystonePlugin(object): - """Contruct a keystone client plugin with karbor user - - to offer the following functions: - - 1. get the endpoint of service, such as nova, cinder - 2. create trust to karbor - """ - - def __init__(self): - self._client = None - self._auth_uri = None - self._karbor_user_id = None - auth_plugin = self._get_karbor_auth_plugin() - self._service_auth_plugin = auth_plugin - - @property - def karbor_user_id(self): - if not self._karbor_user_id: - lcfg = CONF[TRUSTEE_CONF_GROUP] - self._karbor_user_id = self._get_service_user( - lcfg.username, lcfg.user_domain_id) - return self._karbor_user_id - - @property - def client(self): - if not self._client: - self._client = self._get_keystone_client(self.service_auth_plugin) - return self._client - - @property - def auth_uri(self): - if not self._auth_uri: - try: - self._auth_uri = utils.get_auth_uri() - except Exception: - msg = 'get keystone auth url failed' - raise exception.AuthorizationFailure(obj=msg) - return self._auth_uri - - @property - def service_auth_plugin(self): - return self._service_auth_plugin - - def get_service_endpoint(self, service_name, service_type, - region_id, interface='public'): - if self._auth_uri and self._auth_uri.endswith('/'): - base_url = self._auth_uri[:-1] - else: - base_url = self._auth_uri - try: - service = self.client.services.list( - name=service_name, - service_type=service_type, - base_url=base_url) - - endpoint = self.client.endpoints.list( - service=service[0], - interface=interface, - region_id=region_id, - base_url=base_url) - - return endpoint[0].url if endpoint else None - - except Exception: - msg = ('get service(%s) endpoint failed' % service_name) - raise exception.AuthorizationFailure(obj=msg) - - def create_user_auth_plugin(self, context): - if not context.auth_token_info: - msg = ("user=%s, project=%s" % (context.user_id, - context.project_id)) - raise exception.AuthorizationFailure(obj=msg) - - auth_ref = access.create(body=context.auth_token_info, - auth_token=context.auth_token) - return access_plugin.AccessInfoPlugin( - auth_url=self.auth_uri, auth_ref=auth_ref) - - def create_trust_to_karbor(self, context): - l_kc_v3 = self._get_keystone_client( - self.create_user_auth_plugin(context)) - keystone_trust_url = self.auth_uri + 'OS-TRUST' - try: - trust = l_kc_v3.trusts.create(trustor_user=context.user_id, - trustee_user=self.karbor_user_id, - project=context.project_id, - impersonation=True, - role_names=context.roles, - base_url=keystone_trust_url) - return trust.id - - except Exception as e: - raise exception.AuthorizationFailure(obj=str(e)) - - def delete_trust_to_karbor(self, trust_id): - auth_plugin = self._get_karbor_auth_plugin(trust_id) - client = self._get_keystone_client(auth_plugin) - client.trusts.delete(trust_id) - - def create_trust_session(self, trust_id): - auth_plugin = self._get_karbor_auth_plugin(trust_id) - cafile = cfg.CONF.keystone_authtoken.cafile - return keystone_session.Session( - auth=auth_plugin, verify=False if - CONF.keystone_authtoken.insecure else cafile) - - def _get_service_user(self, user_name, user_domain_id): - try: - users = self.client.users.list( - name=user_name, - domain=user_domain_id) - - return users[0].id if users else None - - except Exception as e: - raise exception.AuthorizationFailure(obj=e) - - def _get_karbor_auth_plugin(self, trust_id=None): - auth_plugin = loading.load_auth_from_conf_options( - CONF, TRUSTEE_CONF_GROUP, trust_id=trust_id) - - if not auth_plugin: - LOG.warning('Please add the trustee credentials you need to the' - ' %s section of your karbor.conf file.', - TRUSTEE_CONF_GROUP) - raise exception.AuthorizationFailure(obj=TRUSTEE_CONF_GROUP) - - return auth_plugin - - def _get_keystone_client(self, auth_plugin): - cafile = cfg.CONF.keystone_authtoken.cafile - try: - l_session = keystone_session.Session( - auth=auth_plugin, verify=False if - CONF.keystone_authtoken.insecure else cafile) - return kc_v3.Client(version=KEYSTONECLIENT_VERSION, - session=l_session) - except Exception: - msg = ('create keystone client failed.cafile:(%s)' % cafile) - raise exception.AuthorizationFailure(obj=msg) diff --git a/karbor/common/notification.py b/karbor/common/notification.py deleted file mode 100644 index d9f44b41..00000000 --- a/karbor/common/notification.py +++ /dev/null @@ -1,349 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The notification module.""" - -import abc -import copy -import traceback - -from karbor import exception -from karbor.i18n import _ -from karbor import rpc -from oslo_config import cfg -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class EndNotification(object): - - @property - def _notifier(self): - """Returns the notification for Karbor API.""" - - return (self.context.notification) - - def __init__(self, context, **kwargs): - self.context = context - self.context.notification.payload.update(kwargs) - - def __enter__(self): - return self.context.notification - - def __exit__(self, etype, value, tb): - if etype: - message = str(value) - exception = traceback.format_exception(etype, value, tb) - self._notifier.notify_exc_info(message, exception) - else: - self._notifier.notify_end() - - -class StartNotification(EndNotification): - - def __enter__(self): - self.context.notification.notify_start() - return super(StartNotification, self).__enter__() - - -class KaborAPINotification(object): - - """The traits of karbor.* notifications.""" - - event_type_format = 'karbor.%s.%s' - notify_callback = None - - @classmethod - def register_notify_callback(cls, callback): - """Callback when a notification is sent out.""" - cls.notify_callback = callback - - @abc.abstractmethod - def event_type(self): - 'Returns the event type (like "create" for karbor.create.start)' - pass - - @abc.abstractmethod - def required_start_traits(self): - 'Returns list of required traits for start notification' - pass - - def optional_start_traits(self): - 'Returns list of optional traits for start notification' - return [] - - def required_end_traits(self): - 'Returns list of required traits for end notification' - return [] - - def optional_end_traits(self): - 'Returns list of optional traits for end notification' - return [] - - def required_error_traits(self): - 'Returns list of required traits for error notification' - return ['message', 'exception'] - - def optional_error_traits(self): - 'Returns list of optional traits for error notification' - return ['id'] - - def required_base_traits(self): - return ['tenant_id', 'client_ip', 'request_id'] - - @property - def request_id(self): - return self.payload['request_id'] - - def __init__(self, context, **kwargs): - self.context = context - self.needs_end_notification = True - - self.payload = {} - - if 'request' in kwargs: - request = kwargs.pop('request') - self.payload.update({ - 'request_id': context.request_id, - 'client_ip': request.remote_addr, - 'tenant_id': context.tenant, - }) - elif 'request_id' not in kwargs: - raise exception.InvalidInput( - reason="Notification must include 'request' property") - - self.payload.update(kwargs) - - def serialize(self, context): - return self.payload - - def validate(self, required_traits): - required_keys = set(required_traits) - provided_keys = set(self.payload.keys()) - if not required_keys.issubset(provided_keys): - msg = (_("The following required keys not defined for" - " notification %(name)s: %(keys)s") - % {'name': self.__class__.__name__, - 'keys': list(required_keys - provided_keys)}) - raise exception.InvalidInput(reason=msg) - - def _notify(self, event_qualifier, required_traits, optional_traits, - **kwargs): - self.payload.update(kwargs) - self.validate(self.required_base_traits() + required_traits) - available_values = self.serialize(self.context) - payload = {k: available_values[k] - for k in self.required_base_traits() + required_traits} - for k in optional_traits: - if k in available_values: - payload[k] = available_values[k] - - qualified_event_type = (KaborAPINotification.event_type_format - % (self.event_type(), event_qualifier)) - LOG.debug('Sending event: %(event_type)s, %(payload)s', - {'event_type': qualified_event_type, 'payload': payload}) - - context = copy.copy(self.context) - del context.notification - notifier = rpc.get_notifier() - notifier.info(context, qualified_event_type, self.payload) - if self.notify_callback: - self.notify_callback(event_qualifier) - - def notify_start(self, **kwargs): - self._notify('start', self.required_start_traits(), - self.optional_start_traits(), **kwargs) - - def notify_end(self, **kwargs): - if self.needs_end_notification: - self._notify('end', self.required_end_traits(), - self.optional_end_traits(), **kwargs) - - def notify_exc_info(self, message, exception): - self.payload.update({ - 'message': message, - 'exception': exception - }) - self._notify('error', self.required_error_traits(), - self.optional_error_traits()) - - -class KarborPlanCreate(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'plan_create' - - @abc.abstractmethod - def required_start_traits(self): - return ['name'] - - def optional_start_traits(self): - return ['parameters'] - - def required_end_traits(self): - return ['name'] - - -class KarborPlanDelete(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'plan_delete' - - @abc.abstractmethod - def required_start_traits(self): - return ['id'] - - -class KarborPlanUpdate(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'plan_update' - - @abc.abstractmethod - def required_start_traits(self): - return ['id'] - - -class KarborTriggerDelete(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'trigger_delete' - - @abc.abstractmethod - def required_start_traits(self): - return ['id'] - - -class KarborTriggerCreate(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'trigger_create' - - @abc.abstractmethod - def required_start_traits(self): - return ['name'] - - def optional_start_traits(self): - return ['parameters'] - - def required_end_traits(self): - return ['name'] - - -class KarborTriggerUpdate(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'trigger_update' - - @abc.abstractmethod - def required_start_traits(self): - return ['id'] - - -class KarborRestoreDelete(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'restore_delete' - - @abc.abstractmethod - def required_start_traits(self): - return ['id'] - - -class KarborRestoreCreate(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'restore_create' - - @abc.abstractmethod - def required_start_traits(self): - return ['parameters'] - - def required_end_traits(self): - return ['parameters'] - - -class KarborCheckpointCreate(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'checkpoint_create' - - @abc.abstractmethod - def required_start_traits(self): - return ['checkpoint_properties'] - - def required_end_traits(self): - return ['checkpoint_properties'] - - -class KarborCheckpointDelete(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'checkpoint_delete' - - @abc.abstractmethod - def required_start_traits(self): - return ['checkpoint_id'] - - def required_end_traits(self): - return ['checkpoint_id'] - - -class KarborCheckpointUpdate(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'checkpoint_update' - - @abc.abstractmethod - def required_start_traits(self): - return ['checkpoint_id'] - - -class KarborScheduledOpsCreate(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'scheduled_operation_create' - - @abc.abstractmethod - def required_start_traits(self): - return ['operation_obj'] - - def required_end_traits(self): - return ['operation_obj'] - - -class KarborScheduledOpsDelete(KaborAPINotification): - - @abc.abstractmethod - def event_type(self): - return 'scheduled_operation_delete' - - @abc.abstractmethod - def required_start_traits(self): - return ['id'] - - def required_end_traits(self): - return ['id'] diff --git a/karbor/common/opts.py b/karbor/common/opts.py deleted file mode 100644 index 141e3d4b..00000000 --- a/karbor/common/opts.py +++ /dev/null @@ -1,85 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import itertools - -import karbor.api.common -import karbor.api.v1.protectables -import karbor.api.v1.providers -import karbor.common.config -import karbor.db.api -import karbor.exception -import karbor.service -import karbor.services.operationengine.engine.executors.green_thread_executor as green_thread_executor # noqa -import karbor.services.operationengine.engine.executors.thread_pool_executor as thread_pool_executor # noqa -import karbor.services.operationengine.engine.triggers.timetrigger as time_trigger # noqa -import karbor.services.operationengine.karbor_client -import karbor.services.operationengine.manager -import karbor.services.operationengine.operations.base as base -import karbor.services.protection.clients.cinder -import karbor.services.protection.clients.glance -import karbor.services.protection.clients.manila -import karbor.services.protection.clients.neutron -import karbor.services.protection.clients.nova -import karbor.services.protection.flows.restore -import karbor.services.protection.flows.worker -import karbor.services.protection.manager -import karbor.wsgi.eventlet_server - -__all__ = ['list_opts'] - -_opts = [ - ('clients_keystone', list(itertools.chain( - karbor.common.config.keystone_client_opts))), - ('operationengine', list(itertools.chain( - green_thread_executor.green_thread_executor_opts, - karbor.services.operationengine.manager.trigger_manager_opts))), - ('karbor_client', list(itertools.chain( - karbor.common.config.service_client_opts))), - ('cinder_client', list(itertools.chain( - karbor.common.config.service_client_opts, - karbor.services.protection.clients.cinder.cinder_client_opts))), - ('glance_client', list(itertools.chain( - karbor.common.config.service_client_opts, - karbor.services.protection.clients.glance.glance_client_opts))), - ('manila_client', list(itertools.chain( - karbor.common.config.service_client_opts, - karbor.services.protection.clients.manila.manila_client_opts))), - ('neutron_client', list(itertools.chain( - karbor.common.config.service_client_opts, - karbor.services.protection.clients.neutron.neutron_client_opts))), - ('nova_client', list(itertools.chain( - karbor.common.config.service_client_opts, - karbor.services.protection.clients.nova.nova_client_opts))), - ('DEFAULT', list(itertools.chain( - karbor.common.config.core_opts, - karbor.common.config.debug_opts, - karbor.common.config.global_opts, - karbor.api.common.api_common_opts, - karbor.api.v1.protectables.query_instance_filters_opts, - karbor.api.v1.providers.query_provider_filters_opts, - karbor.api.v1.providers.query_checkpoint_filters_opts, - karbor.db.api.db_opts, - thread_pool_executor.executor_opts, - time_trigger.time_trigger_opts, - base.record_operation_log_executor_opts, - karbor.services.protection.flows.restore.sync_status_opts, - karbor.services.protection.flows.worker.workflow_opts, - karbor.services.protection.manager.protection_manager_opts, - karbor.wsgi.eventlet_server.socket_opts, - karbor.exception.exc_log_opts, - karbor.service.service_opts)))] - - -def list_opts(): - return [(g, copy.deepcopy(o)) for g, o in _opts] diff --git a/karbor/context.py b/karbor/context.py deleted file mode 100644 index e242f7a9..00000000 --- a/karbor/context.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""RequestContext: context for requests that persist through all of karbor.""" - -import copy - -from oslo_config import cfg -from oslo_context import context -from oslo_utils import timeutils -import six - -from karbor import exception -from karbor.i18n import _ -from karbor.objects import base as objects_base -from karbor import policy - -CONF = cfg.CONF - - -class RequestContext(context.RequestContext): - """Security context and request information. - - Represents the user taking a given action within the system. - - """ - def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", - roles=None, project_name=None, remote_address=None, - timestamp=None, request_id=None, auth_token=None, - overwrite=True, quota_class=None, service_catalog=None, - domain=None, user_domain=None, project_domain=None, - auth_token_info=None): - """Initialize RequestContext. - - :param read_deleted: 'no' indicates deleted records are hidden, 'yes' - indicates deleted records are visible, 'only' indicates that - *only* deleted records are visible. - - :param overwrite: Set to False to ensure that the greenthread local - copy of the index is not overwritten. - """ - - super(RequestContext, self).__init__(auth_token=auth_token, - user=user_id, - tenant=project_id, - domain=domain, - user_domain=user_domain, - project_domain=project_domain, - is_admin=is_admin, - request_id=request_id, - overwrite=overwrite) - self.roles = roles or [] - self.project_name = project_name - self.read_deleted = read_deleted - self.remote_address = remote_address - if not timestamp: - timestamp = timeutils.utcnow() - elif isinstance(timestamp, six.string_types): - timestamp = timeutils.parse_isotime(timestamp) - self.timestamp = timestamp - self.quota_class = quota_class - self._auth_token_info = auth_token_info - - if service_catalog: - # Only include required parts of service_catalog - self.service_catalog = [s for s in service_catalog - if s.get('type') in - ('identity', 'compute', 'object-store', - 'image', 'volume', 'volumev2', 'network', - 'volumev3', 'orchestration', - 'share', 'sharev2', 'database')] - else: - # if list is empty or none - self.service_catalog = [] - - # We need to have RequestContext attributes defined - # when policy.check_is_admin invokes request logging - # to make it loggable. - if self.is_admin is None: - self.is_admin = policy.check_is_admin(self) - elif self.is_admin and 'admin' not in self.roles: - self.roles.append('admin') - - def _get_read_deleted(self): - return self._read_deleted - - def _set_read_deleted(self, read_deleted): - if read_deleted not in ('no', 'yes', 'only'): - raise ValueError(_("read_deleted can only be one of 'no', " - "'yes' or 'only', not %r") % read_deleted) - self._read_deleted = read_deleted - - def _del_read_deleted(self): - del self._read_deleted - - read_deleted = property(_get_read_deleted, _set_read_deleted, - _del_read_deleted) - - def to_dict(self): - result = super(RequestContext, self).to_dict() - result['user_id'] = self.user_id - result['project_id'] = self.project_id - result['project_name'] = self.project_name - result['domain'] = self.domain - result['read_deleted'] = self.read_deleted - result['roles'] = self.roles - result['remote_address'] = self.remote_address - result['timestamp'] = self.timestamp.isoformat() - result['quota_class'] = self.quota_class - result['service_catalog'] = self.service_catalog - result['request_id'] = self.request_id - result['auth_token_info'] = self._auth_token_info - return result - - @classmethod - def from_dict(cls, values): - allowed_keys = [ - 'user_id', - 'project_id', - 'project_name', - 'domain', - 'read_deleted', - 'remote_address', - 'timestamp', - 'quota_class', - 'service_catalog', - 'request_id', - 'is_admin', - 'roles', - 'auth_token', - 'user_domain', - 'project_domain', - 'auth_token_info' - ] - kwargs = {k: values[k] for k in values if k in allowed_keys} - return cls(**kwargs) - - def can(self, action, target_obj=None, fatal=True): - """Verifies that the given action is valid on the target in this context. - - :param action: string representing the action to be checked. - :param target: dictionary representing the object of the action - for object creation this should be a dictionary representing the - location of the object e.g. ``{'project_id': context.project_id}``. - If None, then this default target will be considered: - {'project_id': self.project_id, 'user_id': self.user_id} - :param: target_obj: dictionary representing the object which will be - used to update target. - :param fatal: if False, will return False when an - exception.NotAuthorized occurs. - - :raises nova.exception.Forbidden: if verification fails and fatal is - True. - - :return: returns a non-False value (not necessarily "True") if - authorized and False if not authorized and fatal is False. - """ - target = {'project_id': self.project_id, - 'user_id': self.user_id} - if isinstance(target_obj, objects_base.KarborObject): - # Turn object into dict so target.update can work - target.update( - target_obj.obj_to_primitive()['karbor_object.data'] or {}) - else: - target.update(target_obj or {}) - - try: - return policy.authorize(self, action, target) - except exception.NotAuthorized: - if fatal: - raise - return False - - def to_policy_values(self): - policy = super(RequestContext, self).to_policy_values() - - policy['is_admin'] = self.is_admin - - return policy - - def elevated(self, read_deleted=None, overwrite=False): - """Return a version of this context with admin flag set.""" - context = self.deepcopy() - context.is_admin = True - - if 'admin' not in context.roles: - context.roles.append('admin') - - if read_deleted is not None: - context.read_deleted = read_deleted - - return context - - def deepcopy(self): - return copy.deepcopy(self) - - @property - def project_id(self): - return self.tenant - - @project_id.setter - def project_id(self, value): - self.tenant = value - - @property - def user_id(self): - return self.user - - @user_id.setter - def user_id(self, value): - self.user = value - - @property - def auth_token_info(self): - return self._auth_token_info - - -def get_admin_context(read_deleted="no"): - return RequestContext(user_id=None, - project_id=None, - is_admin=True, - read_deleted=read_deleted, - overwrite=False) diff --git a/karbor/db/__init__.py b/karbor/db/__init__.py deleted file mode 100644 index d124b1c5..00000000 --- a/karbor/db/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -DB abstraction for karbor -""" - -from karbor.db.api import * # noqa diff --git a/karbor/db/api.py b/karbor/db/api.py deleted file mode 100644 index cd29802c..00000000 --- a/karbor/db/api.py +++ /dev/null @@ -1,823 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Defines interface for DB access. - -Functions in this module are imported into the karbor.db namespace. Call these -functions from karbor.db namespace, not the karbor.db.api namespace. - -All functions in this module return objects that implement a dictionary-like -interface. Currently, many of these objects are sqlalchemy objects that -implement a dictionary interface. However, a future goal is to have all of -these objects be simple dictionaries. - - -**Related Flags** - -:connection: string specifying the sqlalchemy connection to use, like: - `sqlite:///var/lib/karbor/karbor.sqlite`. - -:enable_new_services: when adding a new service to the database, is it in the - pool of available hardware (Default: True) - -""" - -from oslo_config import cfg -from oslo_db import concurrency as db_concurrency -from oslo_db import options as db_options - - -db_opts = [ - cfg.BoolOpt('enable_new_services', - default=True, - help='Services to be added to the available pool on create'), -] - - -CONF = cfg.CONF -CONF.register_opts(db_opts) -db_options.set_defaults(CONF) - -_BACKEND_MAPPING = {'sqlalchemy': 'karbor.db.sqlalchemy.api'} - - -IMPL = db_concurrency.TpoolDbapiWrapper(CONF, _BACKEND_MAPPING) - -# The maximum value a signed INT type may have -MAX_INT = 0x7FFFFFFF - - -################### - -def dispose_engine(): - """Force the engine to establish new connections.""" - - # FIXME(jdg): When using sqlite if we do the dispose - # we seem to lose our DB here. Adding this check - # means we don't do the dispose, but we keep our sqlite DB - # This likely isn't the best way to handle this - - if 'sqlite' not in IMPL.get_engine().name: - return IMPL.dispose_engine() - else: - return - - -################### - - -def service_destroy(context, service_id): - """Destroy the service or raise if it does not exist.""" - return IMPL.service_destroy(context, service_id) - - -def service_get(context, service_id): - """Get a service or raise if it does not exist.""" - return IMPL.service_get(context, service_id) - - -def service_get_by_host_and_topic(context, host, topic): - """Get a service by host it's on and topic it listens to.""" - return IMPL.service_get_by_host_and_topic(context, host, topic) - - -def service_get_all(context, disabled=None): - """Get all services.""" - return IMPL.service_get_all(context, disabled) - - -def service_get_all_by_topic(context, topic, disabled=None): - """Get all services for a given topic.""" - return IMPL.service_get_all_by_topic(context, topic, disabled=disabled) - - -def service_get_all_by_args(context, host, binary): - """Get all services for a given host and binary.""" - return IMPL.service_get_all_by_args(context, host, binary) - - -def service_get_by_args(context, host, binary): - """Get the state of an service by node name and binary.""" - return IMPL.service_get_by_args(context, host, binary) - - -def service_create(context, values): - """Create a service from the values dictionary.""" - return IMPL.service_create(context, values) - - -def service_update(context, service_id, values): - """Set the given properties on an service and update it. - - Raises NotFound if service does not exist. - - """ - return IMPL.service_update(context, service_id, values) - - -def get_by_id(context, model, id, *args, **kwargs): - return IMPL.get_by_id(context, model, id, *args, **kwargs) - - -################### - - -def trigger_get(context, id): - """Get a trigger by its id. - - :param context: The security context - :param id: ID of the trigger - - :returns: Dictionary-like object containing properties of the trigger - - Raises TriggerNotFound if trigger with the given ID doesn't exist. - """ - return IMPL.trigger_get(context, id) - - -def trigger_create(context, values): - """Create a trigger from the values dictionary. - - :param context: The security context - :param values: Dictionary containing trigger properties - - :returns: Dictionary-like object containing the properties of the created - trigger - """ - return IMPL.trigger_create(context, values) - - -def trigger_update(context, id, values): - """Set the given properties on a trigger and update it. - - :param context: The security context - :param id: ID of the trigger - :param values: Dictionary containing trigger properties to be updated - - :returns: Dictionary-like object containing the properties of the updated - trigger - - Raises TriggerNotFound if trigger with the given ID doesn't exist. - """ - return IMPL.trigger_update(context, id, values) - - -def trigger_delete(context, id): - """Delete a trigger from the database. - - :param context: The security context - :param id: ID of the trigger - - Raises TriggerNotFound if trigger with the given ID doesn't exist. - """ - return IMPL.trigger_delete(context, id) - - -def trigger_get_all_by_filters_sort(context, filters, limit=None, - marker=None, sort_keys=None, - sort_dirs=None): - """Get all triggers that match all filters sorted by multiple keys. - - sort_keys and sort_dirs must be a list of strings. - """ - return IMPL.trigger_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - -################### - - -def trigger_execution_create(context, trigger_id, time): - return IMPL.trigger_execution_create(context, trigger_id, time) - - -def trigger_execution_get_next(context): - return IMPL.trigger_execution_get_next(context) - - -def trigger_execution_delete(context, id, trigger_id): - return IMPL.trigger_execution_delete(context, id, trigger_id) - - -def trigger_execution_update(context, id, current_time, new_time): - return IMPL.trigger_execution_update(context, id, current_time, new_time) - - -################### - - -def scheduled_operation_get(context, id, columns_to_join=[]): - """Get a scheduled operation by its id. - - :param context: The security context - :param id: ID of the scheduled operation - :param columns_to_join: columns which will be joined - - :returns: Dictionary-like object containing properties of the scheduled - operation - - Raises ScheduledOperationNotFound if scheduled operation with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_get(context, id, columns_to_join) - - -def scheduled_operation_create(context, values): - """Create a scheduled operation from the values dictionary. - - :param context: The security context - :param values: Dictionary containing scheduled operation properties - - :returns: Dictionary-like object containing the properties of the created - scheduled operation - """ - return IMPL.scheduled_operation_create(context, values) - - -def scheduled_operation_update(context, id, values): - """Set the given properties on a scheduled operation and update it. - - :param context: The security context - :param id: ID of the scheduled operation - :param values: Dictionary containing scheduled operation properties - to be updated - - :returns: Dictionary-like object containing the properties of the updated - scheduled operation - - Raises ScheduledOperationNotFound if scheduled operation with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_update(context, id, values) - - -def scheduled_operation_delete(context, id): - """Delete a scheduled operation from the database. - - :param context: The security context - :param id: ID of the scheduled operation - - Raises ScheduledOperationNotFound if scheduled operation with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_delete(context, id) - - -def scheduled_operation_get_all_by_filters_sort( - context, filters, limit=None, - marker=None, sort_keys=None, sort_dirs=None): - """Get all operations that match all filters sorted by multiple keys. - - sort_keys and sort_dirs must be a list of strings. - """ - return IMPL.scheduled_operation_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - -################### - - -def scheduled_operation_state_get(context, operation_id, columns_to_join=[]): - """Get a scheduled operation state by its id. - - :param context: The security context - :param operation_id: Operation_id of the scheduled operation state - :columns_to_join: columns which will be joined - - :returns: Dictionary-like object containing properties of the scheduled - operation state - - Raises ScheduledOperationStateNotFound if scheduled operation state with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_state_get(context, operation_id, - columns_to_join) - - -def scheduled_operation_state_create(context, values): - """Create a scheduled operation state from the values dictionary. - - :param context: The security context - :param values: Dictionary containing scheduled operation state properties - - :returns: Dictionary-like object containing the properties of the created - scheduled operation state - """ - return IMPL.scheduled_operation_state_create(context, values) - - -def scheduled_operation_state_update(context, operation_id, values): - """Set the given properties on a scheduled operation state and update it. - - :param context: The security context - :param operation_id: Operation_id of the scheduled operation state - :param values: Dictionary containing scheduled operation state properties - to be updated - - :returns: Dictionary-like object containing the properties of the updated - scheduled operation state - - Raises ScheduledOperationStateNotFound if scheduled operation state with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_state_update(context, operation_id, values) - - -def scheduled_operation_state_delete(context, operation_id): - """Delete a scheduled operation state from the database. - - :param context: The security context - :param operation_id: Operation_id of the scheduled operation state - - Raises ScheduledOperationStateNotFound if scheduled operation state with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_state_delete(context, operation_id) - - -def scheduled_operation_state_get_all_by_filters_sort( - context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None, columns_to_join=[]): - """Get all operation states that match all filters sorted by multiple keys. - - sort_keys and sort_dirs must be a list of strings. - """ - return IMPL.scheduled_operation_state_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, sort_keys=sort_keys, - sort_dirs=sort_dirs, columns_to_join=columns_to_join) - - -################### - - -def scheduled_operation_log_get(context, log_id): - """Get a scheduled operation log by its id. - - :param context: The security context - :param log_id: Log_id of the scheduled operation log - - :returns: Dictionary-like object containing properties of the scheduled - operation log - - Raises ScheduledOperationLogNotFound if scheduled operation log with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_log_get(context, log_id) - - -def scheduled_operation_log_create(context, values): - """Create a scheduled operation log from the values dictionary. - - :param context: The security context - :param values: Dictionary containing scheduled operation log properties - - :returns: Dictionary-like object containing the properties of the created - scheduled operation log - """ - return IMPL.scheduled_operation_log_create(context, values) - - -def scheduled_operation_log_update(context, log_id, values): - """Set the given properties on a scheduled operation log and update it. - - :param context: The security context - :param log_id: Log_id of the scheduled operation log - :param values: Dictionary containing scheduled operation log properties - to be updated - - :returns: Dictionary-like object containing the properties of the updated - scheduled operation log - - Raises ScheduledOperationLogNotFound if scheduled operation log with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_log_update(context, log_id, values) - - -def scheduled_operation_log_delete(context, log_id): - """Delete a scheduled operation log from the database. - - :param context: The security context - :param log_id: Log_id of the scheduled operation log - - Raises ScheduledOperationLogNotFound if scheduled operation log with - the given ID doesn't exist. - """ - return IMPL.scheduled_operation_log_delete(context, log_id) - - -def scheduled_operation_log_delete_oldest(context, operation_id, - retained_num, excepted_states=[]): - """Delete the oldest scheduled operation logs from the database. - - :param context: The security context - :param operation_id: ID of the scheduled operation - :param retained_num: The number of retained logs - :param excepted_states: If the state of log is in excepted_states, - it will not be deleted. - """ - return IMPL.scheduled_operation_log_delete_oldest(context, operation_id, - retained_num, - excepted_states) - - -def scheduled_operation_log_get_all_by_filters_sort( - context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None): - """Get all operation logs that match all filters sorted by multiple keys. - - sort_keys and sort_dirs must be a list of strings. - """ - return IMPL.scheduled_operation_log_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, sort_keys=sort_keys, - sort_dirs=sort_dirs) - - -################### - - -def plan_get(context, plan_id): - """Get a plan or raise if it does not exist.""" - return IMPL.plan_get(context, plan_id) - - -def plan_create(context, values): - """Create a plan from the values dictionary.""" - return IMPL.plan_create(context, values) - - -def plan_update(context, plan_id, values): - """Set the given properties on a plan and update it. - - Raises NotFound if plan does not exist. - - """ - return IMPL.plan_update(context, plan_id, values) - - -def plan_resources_update(context, plan_id, resources): - """Update resources if it exists, otherwise create it.""" - return IMPL.plan_resources_update(context, plan_id, resources) - - -def plan_destroy(context, plan_id): - """Destroy the plan or raise if it does not exist.""" - return IMPL.plan_destroy(context, plan_id) - - -def plan_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - """Get all plans.""" - return IMPL.plan_get_all(context, marker, limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, filters=filters, - offset=offset) - - -def plan_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - """Get all plans belonging to a project.""" - return IMPL.plan_get_all_by_project(context, project_id, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - -def restore_get(context, restore_id): - """Get a restore or raise if it does not exist.""" - return IMPL.restore_get(context, restore_id) - - -def restore_create(context, values): - """Create a restore from the values dictionary.""" - return IMPL.restore_create(context, values) - - -def restore_update(context, restore_id, values): - """Set the given properties on a restore and update it. - - Raises NotFound if plan does not exist. - - """ - return IMPL.restore_update(context, restore_id, values) - - -def restore_destroy(context, restore_id): - """Destroy the restore or raise if it does not exist.""" - return IMPL.restore_destroy(context, restore_id) - - -def restore_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - """Get all restores.""" - return IMPL.restore_get_all(context, marker, limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, filters=filters, - offset=offset) - - -def restore_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - """Get all restores belonging to a project.""" - return IMPL.restore_get_all_by_project(context, project_id, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - -def verification_get(context, verification_id): - """Get a verification or raise if it does not exist.""" - return IMPL.verification_get(context, verification_id) - - -def verification_create(context, values): - """Create a verification from the values dictionary.""" - return IMPL.verification_create(context, values) - - -def verification_update(context, verification_id, values): - """Set the given properties on a verification and update it. - - Raises NotFound if verification does not exist. - - """ - return IMPL.verification_update(context, verification_id, - values) - - -def verification_destroy(context, verification_id): - """Destroy the verification or raise if it does not exist.""" - return IMPL.verification_destroy(context, verification_id) - - -def verification_get_all(context, marker, limit, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - """Get all verifications.""" - return IMPL.verification_get_all( - context, marker, limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, filters=filters, offset=offset) - - -def verification_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, offset=None): - """Get all verifications belonging to a project.""" - return IMPL.verification_get_all_by_project( - context, project_id, marker, limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, filters=filters, offset=offset) - - -def operation_log_get(context, operation_log_id): - """Get a operation log or raise if it does not exist.""" - return IMPL.operation_log_get(context, operation_log_id) - - -def operation_log_create(context, values): - """Create a operation log from the values dictionary.""" - return IMPL.operation_log_create(context, values) - - -def operation_log_update(context, operation_log_id, values): - """Set the given properties on a operation log and update it. - - Raises NotFound if plan does not exist. - - """ - return IMPL.operation_log_update(context, operation_log_id, values) - - -def operation_log_destroy(context, operation_log_id): - """Destroy the operation log or raise if it does not exist.""" - return IMPL.operation_log_destroy(context, operation_log_id) - - -def operation_log_get_all(context, marker, limit, sort_keys=None, - sort_dirs=None, - filters=None, offset=None): - """Get all operation logs.""" - return IMPL.operation_log_get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - -def operation_log_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - offset=None): - """Get all operation logs belonging to a project.""" - return IMPL.operation_log_get_all_by_project(context, project_id, - marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - -################### - - -def checkpoint_record_get(context, checkpoint_record_id): - """Get a checkpoint record or raise if it does not exist.""" - return IMPL.checkpoint_record_get(context, checkpoint_record_id) - - -def checkpoint_record_create(context, values): - """Create a checkpoint record from the values dictionary.""" - return IMPL.checkpoint_record_create(context, values) - - -def checkpoint_record_update(context, checkpoint_record_id, values): - """Set the given properties on a checkpoint record and update it. - - Raises NotFound if checkpoint record does not exist. - - """ - return IMPL.checkpoint_record_update(context, checkpoint_record_id, values) - - -def checkpoint_record_destroy(context, checkpoint_record_id): - """Destroy the checkpoint record or raise if it does not exist.""" - return IMPL.checkpoint_record_destroy(context, checkpoint_record_id) - - -def checkpoint_record_get_all_by_filters_sort( - context, filters, limit=None, - marker=None, sort_keys=None, sort_dirs=None): - """Get all checkpoint records that match all filters sorted - - by multiple keys. sort_keys and sort_dirs must be a list of strings. - - """ - return IMPL.checkpoint_record_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - -def purge_deleted_rows(context, age_in_days): - """Purge deleted rows older than given age from karbor tables - - Raises InvalidParameterValue if age_in_days is incorrect. - :returns: number of deleted rows - """ - return IMPL.purge_deleted_rows(context, age_in_days=age_in_days) - - -#################### - - -def quota_create(context, project_id, resource, limit): - """Create a quota for the given project and resource.""" - return IMPL.quota_create(context, project_id, resource, limit) - - -def quota_get(context, project_id, resource): - """Retrieve a quota or raise if it does not exist.""" - return IMPL.quota_get(context, project_id, resource) - - -def quota_get_all_by_project(context, project_id): - """Retrieve all quotas associated with a given project.""" - return IMPL.quota_get_all_by_project(context, project_id) - - -def quota_update(context, project_id, resource, limit): - """Update a quota or raise if it does not exist.""" - return IMPL.quota_update(context, project_id, resource, limit) - - -def quota_destroy(context, project_id, resource): - """Destroy the quota or raise if it does not exist.""" - return IMPL.quota_destroy(context, project_id, resource) - - -################### - - -def quota_class_create(context, class_name, resource, limit): - """Create a quota class for the given name and resource.""" - return IMPL.quota_class_create(context, class_name, resource, limit) - - -def quota_class_get(context, class_name, resource): - """Retrieve a quota class or raise if it does not exist.""" - return IMPL.quota_class_get(context, class_name, resource) - - -def quota_class_get_all_by_name(context, class_name): - """Retrieve all quotas associated with a given quota class.""" - return IMPL.quota_class_get_all_by_name(context, class_name) - - -def quota_class_update(context, class_name, resource, limit): - """Update a quota class or raise if it does not exist.""" - return IMPL.quota_class_update(context, class_name, resource, limit) - - -def quota_class_destroy(context, class_name, resource): - """Destroy the quota class or raise if it does not exist.""" - return IMPL.quota_class_destroy(context, class_name, resource) - - -def quota_class_destroy_all_by_name(context, class_name): - """Destroy all quotas associated with a given quota class.""" - return IMPL.quota_class_destroy_all_by_name(context, class_name) - - -################### - - -def quota_usage_create(context, project_id, resource, in_use, reserved, - until_refresh): - """Create a quota usage for the given project and resource.""" - return IMPL.quota_usage_create(context, project_id, resource, - in_use, reserved, until_refresh) - - -def quota_usage_get(context, project_id, resource): - """Retrieve a quota usage or raise if it does not exist.""" - return IMPL.quota_usage_get(context, project_id, resource) - - -def quota_usage_get_all_by_project(context, project_id): - """Retrieve all usage associated with a given resource.""" - return IMPL.quota_usage_get_all_by_project(context, project_id) - - -################### - - -def reservation_create(context, uuid, usage, project_id, resource, delta, - expire): - """Create a reservation for the given project and resource.""" - return IMPL.reservation_create(context, uuid, usage, project_id, - resource, delta, expire) - - -def reservation_get(context, uuid): - """Retrieve a reservation or raise if it does not exist.""" - return IMPL.reservation_get(context, uuid) - - -def reservation_get_all_by_project(context, project_id): - """Retrieve all reservations associated with a given project.""" - return IMPL.reservation_get_all_by_project(context, project_id) - - -def reservation_destroy(context, uuid): - """Destroy the reservation or raise if it does not exist.""" - return IMPL.reservation_destroy(context, uuid) - - -################### - - -def quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age, project_id=None): - """Check quotas and create appropriate reservations.""" - return IMPL.quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age, project_id=project_id) - - -def reservation_commit(context, reservations, project_id=None): - """Commit quota reservations.""" - return IMPL.reservation_commit(context, reservations, - project_id=project_id) - - -def reservation_rollback(context, reservations, project_id=None): - """Roll back quota reservations.""" - return IMPL.reservation_rollback(context, reservations, - project_id=project_id) - - -def quota_destroy_all_by_project(context, project_id): - """Destroy all quotas associated with a given project.""" - return IMPL.quota_destroy_all_by_project(context, project_id) - - -def reservation_expire(context): - """Roll back any expired reservations.""" - return IMPL.reservation_expire(context) - - -################### - - -def authorize_project_context(context, project_id): - """Ensures a request has permission to access the given project.""" - return IMPL.authorize_project_context(context, project_id) diff --git a/karbor/db/base.py b/karbor/db/base.py deleted file mode 100644 index 6aa9b6e5..00000000 --- a/karbor/db/base.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Base class for classes that need modular database access.""" - - -from oslo_config import cfg -from oslo_utils import importutils - - -db_driver_opt = cfg.StrOpt('db_driver', - default='karbor.db', - help='Driver to use for database access') - -CONF = cfg.CONF -CONF.register_opt(db_driver_opt) - - -class Base(object): - """DB driver is injected in the init method.""" - - def __init__(self, db_driver=None): - # NOTE(mriedem): Without this call, multiple inheritance involving - # the db Base class does not work correctly. - super(Base, self).__init__() - if not db_driver: - db_driver = CONF.db_driver - self.db = importutils.import_module(db_driver) # pylint: disable=C0103 - self.db.dispose_engine() diff --git a/karbor/db/migration.py b/karbor/db/migration.py deleted file mode 100644 index 49dcd004..00000000 --- a/karbor/db/migration.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Database setup and migration commands.""" - -import os -import threading - -from oslo_config import cfg -from oslo_db import options -from stevedore import driver - -from karbor.db.sqlalchemy import api as db_api - -INIT_VERSION = 000 - -_IMPL = None -_LOCK = threading.Lock() - -options.set_defaults(cfg.CONF) - -MIGRATE_REPO_PATH = os.path.join( - os.path.abspath(os.path.dirname(__file__)), - 'sqlalchemy', - 'migrate_repo', -) - - -def get_backend(): - global _IMPL - if _IMPL is None: - with _LOCK: - if _IMPL is None: - _IMPL = driver.DriverManager( - "karbor.database.migration_backend", - cfg.CONF.database.backend).driver - return _IMPL - - -def db_sync(version=None, init_version=INIT_VERSION, engine=None): - """Migrate the database to `version` or the most recent version.""" - - if engine is None: - engine = db_api.get_engine() - return get_backend().db_sync(engine=engine, - abs_path=MIGRATE_REPO_PATH, - version=version, - init_version=init_version) diff --git a/karbor/db/sqlalchemy/__init__.py b/karbor/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/db/sqlalchemy/api.py b/karbor/db/sqlalchemy/api.py deleted file mode 100644 index 8c198cac..00000000 --- a/karbor/db/sqlalchemy/api.py +++ /dev/null @@ -1,2473 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of SQLAlchemy backend.""" - -import datetime as dt -import functools -import re -import six -import sys -import threading -import time -import uuid - -from oslo_config import cfg -from oslo_db import api as oslo_db_api -from oslo_db import exception as db_exc -from oslo_db import options -from oslo_db.sqlalchemy import session as db_session -from oslo_db.sqlalchemy import utils as sqlalchemyutils -from oslo_log import log as logging -from oslo_utils import timeutils -from oslo_utils import uuidutils -from sqlalchemy import MetaData -from sqlalchemy.orm import joinedload -from sqlalchemy.schema import Table -from sqlalchemy.sql import expression -from sqlalchemy.sql.expression import literal_column -from sqlalchemy.sql import func - -from karbor.db.sqlalchemy import models -from karbor import exception -from karbor.i18n import _ - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -options.set_defaults(CONF, connection='sqlite:///$state_path/karbor.sqlite') - -_LOCK = threading.Lock() -_FACADE = None -_GET_METHODS = {} - - -def _create_facade_lazily(): - global _LOCK - with _LOCK: - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade( - CONF.database.connection, - **dict(CONF.database) - ) - - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def dispose_engine(): - get_engine().dispose() - - -_DEFAULT_QUOTA_NAME = 'default' - - -def get_backend(): - """The backend is this module itself.""" - - return sys.modules[__name__] - - -def is_admin_context(context): - """Indicates if the request context is an administrator.""" - if not context: - LOG.warning('Use of empty request context is deprecated', - DeprecationWarning) - raise Exception('die') - return context.is_admin - - -def is_user_context(context): - """Indicates if the request context is a normal user.""" - if not context: - return False - if context.is_admin: - return False - if not context.user_id or not context.project_id: - return False - return True - - -def authorize_project_context(context, project_id): - """Ensures a request has permission to access the given project.""" - if is_user_context(context): - if not context.project_id: - raise exception.NotAuthorized() - elif context.project_id != project_id: - raise exception.NotAuthorized() - - -def authorize_user_context(context, user_id): - """Ensures a request has permission to access the given user.""" - if is_user_context(context): - if not context.user_id: - raise exception.NotAuthorized() - elif context.user_id != user_id: - raise exception.NotAuthorized() - - -def require_admin_context(f): - """Decorator to require admin request context. - - The first argument to the wrapped function must be the context. - - """ - - def wrapper(*args, **kwargs): - if not is_admin_context(args[0]): - raise exception.AdminRequired() - return f(*args, **kwargs) - return wrapper - - -def require_context(f): - """Decorator to require *any* user or admin context. - - This does no authorization for user or project access matching, see - :py:func:`authorize_project_context` and - :py:func:`authorize_user_context`. - - The first argument to the wrapped function must be the context. - - """ - - def wrapper(*args, **kwargs): - if not is_admin_context(args[0]) and not is_user_context(args[0]): - raise exception.NotAuthorized() - return f(*args, **kwargs) - return wrapper - - -def require_plan_exists(f): - """Decorator to require the specified plan to exist. - - Requires the wrapped function to use context and plan_id as - their first two arguments. - """ - @functools.wraps(f) - def wrapper(context, plan_id, *args, **kwargs): - plan_get(context, plan_id) - return f(context, plan_id, *args, **kwargs) - return wrapper - - -def _retry_on_deadlock(f): - """Decorator to retry a DB API call if Deadlock was received.""" - @functools.wraps(f) - def wrapped(*args, **kwargs): - while True: - try: - return f(*args, **kwargs) - except db_exc.DBDeadlock: - LOG.warning("Deadlock detected when running '%(func_name)s':" - " Retrying...", dict(func_name=f.__name__)) - # Retry! - time.sleep(0.5) - continue - functools.update_wrapper(wrapped, f) - return wrapped - - -def model_query(context, *args, **kwargs): - """Query helper that accounts for context's `read_deleted` field. - - :param context: context to query under - :param session: if present, the session to use - :param read_deleted: if present, overrides context's read_deleted field. - :param project_only: if present and context is user-type, then restrict - query to match the context's project_id. - """ - session = kwargs.get('session') or get_session() - read_deleted = kwargs.get('read_deleted') or context.read_deleted - project_only = kwargs.get('project_only') - - query = session.query(*args) - if read_deleted == 'no': - query = query.filter_by(deleted=False) - elif read_deleted == 'yes': - pass # omit the filter to include deleted and active - elif read_deleted == 'only': - query = query.filter_by(deleted=True) - else: - raise Exception( - _("Unrecognized read_deleted value '%s'") % read_deleted) - - if project_only and is_user_context(context): - query = query.filter_by(project_id=context.project_id) - - return query - - -@require_admin_context -def service_destroy(context, service_id): - session = get_session() - with session.begin(): - service_ref = _service_get(context, service_id, session=session) - service_ref.delete(session=session) - - -@require_admin_context -def _service_get(context, service_id, session=None): - result = model_query( - context, - models.Service, - session=session - ).filter_by(id=service_id).first() - if not result: - raise exception.ServiceNotFound(service_id=service_id) - - return result - - -@require_admin_context -def service_get(context, service_id): - return _service_get(context, service_id) - - -@require_admin_context -def service_get_all(context, disabled=None): - query = model_query(context, models.Service) - - if disabled is not None: - query = query.filter_by(disabled=disabled) - - return query.all() - - -@require_admin_context -def service_get_all_by_args(context, host, binary): - results = model_query( - context, - models.Service - ) - if host is not None: - results = results.filter_by(host=host) - if binary is not None: - results = results.filter_by(binary=binary) - - return results.all() - - -@require_admin_context -def service_get_all_by_topic(context, topic, disabled=None): - query = model_query( - context, - models.Service, - read_deleted="no" - ).filter_by(topic=topic) - - if disabled is not None: - query = query.filter_by(disabled=disabled) - - return query.all() - - -@require_admin_context -def service_get_by_host_and_topic(context, host, topic): - result = model_query( - context, - models.Service, - read_deleted="no" - ).filter_by( - disabled=False - ).filter_by( - host=host - ).filter_by( - topic=topic - ).first() - if not result: - raise exception.ServiceNotFound(service_id=None) - return result - - -@require_admin_context -def _service_get_all_topic_subquery(context, session, topic, subq, label): - sort_value = getattr(subq.c, label) - return model_query( - context, - models.Service, - func.coalesce(sort_value, 0), - session=session, - read_deleted="no" - ).filter_by( - topic=topic - ).filter_by( - disabled=False - ).outerjoin( - (subq, models.Service.host == subq.c.host) - ).order_by( - sort_value - ).all() - - -@require_admin_context -def service_get_by_args(context, host, binary): - results = model_query( - context, - models.Service - ).filter_by( - host=host - ).filter_by( - binary=binary - ).all() - - for result in results: - if host == result['host']: - return result - - raise exception.HostBinaryNotFound(host=host, binary=binary) - - -@require_admin_context -def service_create(context, values): - service_ref = models.Service() - service_ref.update(values) - if not CONF.enable_new_services: - service_ref.disabled = True - - session = get_session() - with session.begin(): - service_ref.save(session) - return service_ref - - -@require_admin_context -def service_update(context, service_id, values): - session = get_session() - with session.begin(): - service_ref = _service_get(context, service_id, session=session) - if 'disabled' in values: - service_ref['modified_at'] = timeutils.utcnow() - service_ref['updated_at'] = literal_column('updated_at') - service_ref.update(values) - return service_ref - - -def _get_get_method(model): - # General conversion - # Convert camel cased model name to snake format - s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__) - # Get method must be snake formatted model name concatenated with _get - method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get' - return globals().get(method_name) - - -@require_context -def get_by_id(context, model, id, *args, **kwargs): - # Add get method to cache dictionary if it's not already there - if not _GET_METHODS.get(model): - _GET_METHODS[model] = _get_get_method(model) - - return _GET_METHODS[model](context, id, *args, **kwargs) - - -################### - - -def trigger_get(context, id): - return _trigger_get(context, id) - - -def _trigger_get(context, id, session=None): - result = model_query(context, models.Trigger, - session=session).filter_by(id=id) - result = result.first() - - if not result: - raise exception.TriggerNotFound(id=id) - - return result - - -def trigger_create(context, values): - if not values.get('id'): - values['id'] = uuidutils.generate_uuid() - - trigger_ref = models.Trigger() - trigger_ref.update(values) - trigger_ref.save(get_session()) - return trigger_ref - - -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -def trigger_update(context, id, values): - """Update the Trigger record with the most recent data.""" - - session = get_session() - with session.begin(): - trigger_ref = _trigger_get(context, id, session=session) - trigger_ref.update(values) - trigger_ref.save(session) - return trigger_ref - - -def trigger_delete(context, id): - """Delete a Trigger record.""" - - session = get_session() - with session.begin(): - trigger_ref = _trigger_get(context, id, session=session) - trigger_ref.delete(session=session) - - -def _trigger_list_query(context, session, **kwargs): - return model_query(context, models.Trigger, session=session) - - -def _trigger_list_process_filters(query, filters): - exact_match_filter_names = ['project_id', 'type'] - query = _list_common_process_exact_filter(models.Trigger, query, filters, - exact_match_filter_names) - - regex_match_filter_names = ['name', 'properties'] - query = _list_common_process_regex_filter(models.Trigger, query, filters, - regex_match_filter_names) - - return query - - -def trigger_get_all_by_filters_sort(context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None): - session = get_session() - with session.begin(): - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - paginate_type=models.Trigger, - use_model=True) - - return query.all() if query else [] - - -################### - - -def _trigger_execution_list_query(context, session, **kwargs): - return model_query(context, models.TriggerExecution, session=session) - - -def _trigger_execution_list_process_filters(query, filters): - exact_match_filter_names = ['id', 'trigger_id', 'execution_time'] - query = _list_common_process_exact_filter(models.Trigger, query, filters, - exact_match_filter_names) - return query - - -def _trigger_execution_get(context, id, session=None): - result = model_query(context, models.TriggerExecution, - session=session).filter_by(id=id) - result = result.first() - - if not result: - raise exception.TriggerNotFound(id=id) - - return result - - -def trigger_execution_update(context, id, old_time, new_time): - session = get_session() - try: - with session.begin(): - result = model_query( - context, models.TriggerExecution, session=session - ).filter_by( - id=id, execution_time=old_time - ).update({"execution_time": new_time}) - except Exception as e: - LOG.warning("Unable to update trigger execution (%(execution)s): " - "%(exc)s", - {"execution": id, "exc": e}) - return False - else: - LOG.debug("Updated trigger execution (%(execution)s) from %(old_time)s" - " to %(new_time)s", - {"execution": id, "old_time": old_time, "new_time": new_time} - ) - return result == 1 - - -def trigger_execution_create(context, trigger_id, time): - trigger_ex_ref = models.TriggerExecution() - trigger_ex_ref.update({ - 'id': uuidutils.generate_uuid(), - 'trigger_id': trigger_id, - 'execution_time': time, - }) - trigger_ex_ref.save(get_session()) - return trigger_ex_ref - - -def trigger_execution_delete(context, id, trigger_id): - filters = {} - if id: - filters['id'] = id - if trigger_id: - filters['trigger_id'] = trigger_id - - session = get_session() - try: - with session.begin(): - deleted = model_query( - context, models.TriggerExecution, session=session - ).filter_by(**filters).delete() - except Exception as e: - LOG.warning("Unable to delete trigger (%(trigger)s) execution " - "(%(execution)s): %(exc)s", - {"trigger": trigger_id, "execution": id, "exc": e}) - return False - else: - LOG.debug("Deleted trigger (%(trigger)s) execution (%(execution)s)", - {"trigger": trigger_id, "execution": id}) - return deleted == 1 - - -def trigger_execution_get_next(context): - session = get_session() - try: - with session.begin(): - query = _generate_paginate_query( - context, session, - marker=None, - limit=1, - sort_keys=('execution_time', ), - sort_dirs=('asc', ), - filters=None, - paginate_type=models.TriggerExecution, - ) - result = query.first() - except Exception as e: - LOG.warning("Unable to get next trigger execution %s", e) - return None - else: - return result - - -################### - - -def scheduled_operation_get(context, id, columns_to_join=[]): - return _scheduled_operation_get(context, id, - columns_to_join=columns_to_join) - - -def _scheduled_operation_get(context, id, columns_to_join=[], session=None): - query = model_query(context, models.ScheduledOperation, - session=session).filter_by(id=id) - - if columns_to_join and 'trigger' in columns_to_join: - query = query.options(joinedload('trigger')) - - result = query.first() - if not result: - raise exception.ScheduledOperationNotFound(id=id) - - return result - - -def scheduled_operation_create(context, values): - if not values.get('id'): - values['id'] = uuidutils.generate_uuid() - - operation_ref = models.ScheduledOperation() - operation_ref.update(values) - operation_ref.save(get_session()) - return operation_ref - - -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -def scheduled_operation_update(context, id, values): - """Update the ScheduledOperation record with the most recent data.""" - - session = get_session() - with session.begin(): - operation_ref = _scheduled_operation_get(context, id, - session=session) - operation_ref.update(values) - operation_ref.save(session) - return operation_ref - - -def scheduled_operation_delete(context, id): - """Delete a ScheduledOperation record.""" - - session = get_session() - with session.begin(): - operation_ref = _scheduled_operation_get(context, id, - session=session) - session.delete(operation_ref) - session.flush() - - -def _scheduled_operation_list_query(context, session, **kwargs): - return model_query(context, models.ScheduledOperation, session=session) - - -def _scheduled_operation_list_process_filters(query, filters): - exact_match_filter_names = ['project_id', 'operation_type', 'trigger_id'] - query = _list_common_process_exact_filter( - models.ScheduledOperation, query, filters, - exact_match_filter_names) - - regex_match_filter_names = ['name', 'operation_definition'] - query = _list_common_process_regex_filter( - models.ScheduledOperation, query, filters, - regex_match_filter_names) - - return query - - -def scheduled_operation_get_all_by_filters_sort( - context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None): - - session = get_session() - with session.begin(): - query = _generate_paginate_query( - context, session, marker, limit, - sort_keys, sort_dirs, filters, - paginate_type=models.ScheduledOperation, - use_model=True) - - return query.all() if query else [] - - -################### - - -def scheduled_operation_state_get(context, operation_id, columns_to_join=[]): - return _scheduled_operation_state_get(context, operation_id, - columns_to_join=columns_to_join) - - -def _scheduled_operation_state_get(context, operation_id, - columns_to_join=[], session=None): - query = model_query(context, models.ScheduledOperationState, - session=session).filter_by(operation_id=operation_id) - - if columns_to_join and 'operation' in columns_to_join: - query = query.options(joinedload('operation')) - - result = query.first() - if not result: - raise exception.ScheduledOperationStateNotFound(op_id=operation_id) - return result - - -def scheduled_operation_state_create(context, values): - state_ref = models.ScheduledOperationState() - state_ref.update(values) - state_ref.save(get_session()) - return state_ref - - -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -def scheduled_operation_state_update(context, operation_id, values): - """Update the ScheduledOperationState record with the most recent data.""" - - session = get_session() - with session.begin(): - state_ref = _scheduled_operation_state_get(context, operation_id, - session=session) - state_ref.update(values) - state_ref.save(session) - return state_ref - - -def scheduled_operation_state_delete(context, operation_id): - """Delete a ScheduledOperationState record.""" - - session = get_session() - with session.begin(): - state_ref = _scheduled_operation_state_get(context, operation_id, - session=session) - session.delete(state_ref) - session.flush() - - -def _scheduled_operation_state_list_query(context, session, **kwargs): - query = model_query(context, models.ScheduledOperationState, - session=session) - - valid_columns = ['operation'] - columns_to_join = kwargs.get('columns_to_join', []) - for column in columns_to_join: - if column in valid_columns: - query = query.options(joinedload(column)) - - return query - - -def _scheduled_operation_state_list_process_filters(query, filters): - exact_match_filter_names = ['service_id', 'state'] - query = _list_common_process_exact_filter( - models.ScheduledOperationState, query, filters, - exact_match_filter_names) - - return query - - -def scheduled_operation_state_get_all_by_filters_sort( - context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None, columns_to_join=[]): - - session = get_session() - with session.begin(): - query = _generate_paginate_query( - context, session, marker, limit, - sort_keys, sort_dirs, filters, - paginate_type=models.ScheduledOperationState, - use_model=True, columns_to_join=columns_to_join) - - return query.all() if query else [] - - -################### - - -def scheduled_operation_log_get(context, log_id): - return _scheduled_operation_log_get(context, log_id) - - -def _scheduled_operation_log_get(context, log_id, session=None): - result = model_query(context, models.ScheduledOperationLog, - session=session).filter_by(id=log_id).first() - - if not result: - raise exception.ScheduledOperationLogNotFound(log_id=log_id) - - return result - - -def scheduled_operation_log_create(context, values): - log_ref = models.ScheduledOperationLog() - log_ref.update(values) - log_ref.save(get_session()) - return log_ref - - -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -def scheduled_operation_log_update(context, log_id, values): - """Update the ScheduledOperationLog record with the most recent data.""" - - session = get_session() - with session.begin(): - log_ref = _scheduled_operation_log_get(context, log_id, - session=session) - log_ref.update(values) - log_ref.save(session) - return log_ref - - -def scheduled_operation_log_delete(context, log_id): - """Delete a ScheduledOperationLog record.""" - - session = get_session() - with session.begin(): - log_ref = _scheduled_operation_log_get(context, log_id, - session=session) - session.delete(log_ref) - session.flush() - - -def scheduled_operation_log_delete_oldest(context, operation_id, - retained_num, excepted_states): - table = models.ScheduledOperationLog - session = get_session() - with session.begin(): - result = model_query(context, table, session=session).filter_by( - operation_id=operation_id).order_by( - expression.desc(table.created_at)).limit(retained_num).all() - - if not result or len(result) < retained_num: - return - oldest_create_time = result[-1]['created_at'] - - if excepted_states and isinstance(excepted_states, list): - filters = expression.and_( - table.operation_id == operation_id, - table.created_at < oldest_create_time, - table.state.notin_(excepted_states)) - else: - filters = expression.and_( - table.operation_id == operation_id, - table.created_at < oldest_create_time) - - model_query(context, table, session=session).filter( - filters).delete(synchronize_session=False) - - -def _scheduled_operation_log_list_query(context, session, **kwargs): - query = model_query(context, models.ScheduledOperationLog, - session=session) - return query - - -def _scheduled_operation_log_list_process_filters(query, filters): - exact_match_filter_names = ['operation_id', 'state'] - query = _list_common_process_exact_filter( - models.ScheduledOperationLog, query, filters, - exact_match_filter_names) - - return query - - -def scheduled_operation_log_get_all_by_filters_sort( - context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None): - - session = get_session() - with session.begin(): - query = _generate_paginate_query( - context, session, marker, limit, - sort_keys, sort_dirs, filters, - paginate_type=models.ScheduledOperationLog, - use_model=True) - - return query.all() if query else [] - - -################### - - -def _resource_refs(resource_list, meta_class): - resource_refs = [] - if resource_list: - for resource in resource_list: - resource_ref = meta_class() - resource_ref['resource_id'] = resource['id'] - resource_ref['resource_type'] = resource['type'] - resource_ref['resource_name'] = resource['name'] - resource_ref['resource_extra_info'] = resource.get( - 'extra_info', None) - resource_refs.append(resource_ref) - return resource_refs - - -@require_context -def _plan_get_query(context, session=None, project_only=False, - joined_load=True): - """Get the query to retrieve the plan. - - :param context: the context used to run the method _plan_get_query - :param session: the session to use - :param project_only: the boolean used to decide whether to query the - plan in the current project or all projects - :param joined_load: the boolean used to decide whether the query loads - the other models, which join the plan model in - the database. - :returns: updated query or None - """ - query = model_query(context, models.Plan, session=session, - project_only=project_only) - if joined_load: - query = query.options(joinedload('resources')) - return query - - -def _plan_resources_get_query(context, plan_id, model, session=None): - return model_query( - context, - model, - session=session, - read_deleted="no" - ).filter_by(plan_id=plan_id) - - -@require_context -def _resource_create(context, values): - resource_ref = models.Resource() - resource_ref.update(values) - session = get_session() - with session.begin(): - resource_ref.save(session) - return resource_ref - - -@require_context -def _plan_resources_update(context, plan_id, resources, session=None): - session = session or get_session() - now = timeutils.utcnow() - with session.begin(): - model_query( - context, - models.Resource, - session=session - ).filter_by( - plan_id=plan_id - ).update({ - 'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at') - }) - resources_list = [] - for resource in resources: - resource['plan_id'] = plan_id - resource['resource_id'] = resource.pop('id') - resource['resource_type'] = resource.pop('type') - resource['resource_name'] = resource.pop('name') - resource['resource_extra_info'] = resource.pop( - 'extra_info', None) - resource_ref = _resource_create(context, resource) - resources_list.append(resource_ref) - - return resources_list - - -@require_context -def _plan_get(context, plan_id, session=None, joined_load=True): - result = _plan_get_query(context, session=session, project_only=True, - joined_load=joined_load) - result = result.filter_by(id=plan_id).first() - - if not result: - raise exception.PlanNotFound(plan_id=plan_id) - - return result - - -@require_context -def plan_create(context, values): - values['resources'] = _resource_refs(values.get('resources'), - models.Resource) - - plan_ref = models.Plan() - if not values.get('id'): - values['id'] = uuidutils.generate_uuid() - plan_ref.update(values) - - session = get_session() - with session.begin(): - session.add(plan_ref) - - return _plan_get(context, values['id'], session=session) - - -@require_context -def plan_get(context, plan_id): - return _plan_get(context, plan_id) - - -@require_admin_context -@_retry_on_deadlock -def plan_destroy(context, plan_id): - session = get_session() - now = timeutils.utcnow() - with session.begin(): - model_query( - context, - models.Plan, - session=session - ).filter_by( - id=plan_id - ).update({ - 'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at') - }) - model_query( - context, - models.Resource, - session=session - ).filter_by( - plan_id=plan_id - ).update({ - 'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at') - }) - - -@require_context -@require_plan_exists -def plan_update(context, plan_id, values): - session = get_session() - with session.begin(): - resources = values.get('resources') - if resources is not None: - _plan_resources_update(context, - plan_id, - values.pop('resources'), - session=session) - - plan_ref = _plan_get(context, plan_id, session=session) - plan_ref.update(values) - - return plan_ref - - -@require_context -@require_plan_exists -@_retry_on_deadlock -def plan_resources_update(context, plan_id, resources): - return _plan_resources_update(context, - plan_id, - resources) - - -@require_admin_context -def plan_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - """Retrieves all plans. - - If no sort parameters are specified then the returned plans are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_plan_filters - function for more information - :returns: list of matching plans - """ - session = get_session() - with session.begin(): - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, offset) - # No plans would match, return empty list - if query is None: - return [] - return query.all() - - -@require_context -def plan_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - """Retrieves all plans in a project. - - If no sort parameters are specified then the returned plans are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param project_id: project for all plans being retrieved - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_plan_filters - function for more information - :returns: list of matching plans - """ - session = get_session() - with session.begin(): - authorize_project_context(context, project_id) - # Add in the project filter without modifying the given filters - filters = filters.copy() if filters else {} - filters['project_id'] = project_id - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, offset) - # No plans would match, return empty list - if query is None: - return [] - return query.all() - - -def _process_plan_filters(query, filters): - exact_match_filter_names = ['project_id', 'status'] - query = _list_common_process_exact_filter(models.Plan, query, filters, - exact_match_filter_names) - - regex_match_filter_names = ['name', 'description'] - query = _list_common_process_regex_filter(models.Plan, query, filters, - regex_match_filter_names) - - return query - - -############################### - - -@require_context -def restore_create(context, values): - restore_ref = models.Restore() - if not values.get('id'): - values['id'] = uuidutils.generate_uuid() - restore_ref.update(values) - - session = get_session() - with session.begin(): - restore_ref.save(session) - return restore_ref - - -@require_context -def restore_get(context, restore_id): - return _restore_get(context, restore_id) - - -@require_context -def _restore_get(context, restore_id, session=None): - result = model_query( - context, - models.Restore, - session=session - ).filter_by( - id=restore_id - ).first() - if not result: - raise exception.RestoreNotFound(restore_id=restore_id) - - return result - - -@require_context -def restore_update(context, restore_id, values): - session = get_session() - with session.begin(): - restore_ref = _restore_get(context, restore_id, session=session) - restore_ref.update(values) - return restore_ref - - -@require_context -@_retry_on_deadlock -def restore_destroy(context, restore_id): - session = get_session() - with session.begin(): - restore_ref = _restore_get(context, restore_id, session=session) - restore_ref.delete(session=session) - - -def is_valid_model_filters(model, filters): - """Return True if filter values exist on the model - - :param model: a karbor model - :param filters: dictionary of filters - """ - for key in filters.keys(): - try: - getattr(model, key) - except AttributeError: - LOG.debug("'%s' filter key is not valid.", key) - return False - return True - - -def _restore_get_query(context, session=None, project_only=False): - return model_query(context, models.Restore, session=session, - project_only=project_only) - - -@require_admin_context -def restore_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - """Retrieves all restores. - - If no sort parameters are specified then the returned plans are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_plan_filters - function for more information - :returns: list of matching restores - """ - if filters and not is_valid_model_filters(models.Restore, filters): - return [] - - session = get_session() - with session.begin(): - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.Restore) - # No restores would match, return empty list - if query is None: - return [] - return query.all() - - -@require_context -def restore_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - """Retrieves all restores in a project. - - If no sort parameters are specified then the returned plans are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param project_id: project for all plans being retrieved - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_plan_filters - function for more information - :returns: list of matching restores - """ - if filters and not is_valid_model_filters(models.Restore, filters): - return [] - - session = get_session() - with session.begin(): - authorize_project_context(context, project_id) - # Add in the project filter without modifying the given filters - filters = filters.copy() if filters else {} - filters['project_id'] = project_id - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.Restore) - # No plans would match, return empty list - if query is None: - return [] - return query.all() - - -def _process_restore_filters(query, filters): - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.Restore, filters): - return None - query = query.filter_by(**filters) - return query - - -############################### - - -@require_context -def operation_log_create(context, values): - operation_log_ref = models.OperationLog() - if not values.get('id'): - values['id'] = uuidutils.generate_uuid() - operation_log_ref.update(values) - - session = get_session() - with session.begin(): - operation_log_ref.save(session) - return operation_log_ref - - -@require_context -def operation_log_get(context, operation_log_id): - return _operation_log_get(context, operation_log_id) - - -@require_context -def _operation_log_get(context, operation_log_id, session=None): - result = model_query( - context, - models.OperationLog, - session=session - ).filter_by( - id=operation_log_id - ).first() - if not result: - raise exception.OperationLogNotFound(operation_log_id=operation_log_id) - - return result - - -@require_context -def operation_log_update(context, operation_log_id, values): - session = get_session() - with session.begin(): - operation_log_ref = _operation_log_get(context, operation_log_id, - session=session) - operation_log_ref.update(values) - return operation_log_ref - - -@require_context -@_retry_on_deadlock -def operation_log_destroy(context, operation_log_id): - session = get_session() - with session.begin(): - operation_log_ref = _operation_log_get(context, operation_log_id, - session=session) - operation_log_ref.delete(session=session) - - -def _operation_log_get_query(context, session=None, project_only=False): - return model_query(context, models.OperationLog, session=session, - project_only=project_only) - - -@require_admin_context -def operation_log_get_all(context, marker, limit, sort_keys=None, - sort_dirs=None, - filters=None, offset=None): - """Retrieves all operation logs. - - If no sort parameters are specified then the returned plans are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_plan_filters - function for more information - :returns: list of matching operation logs - """ - if filters and not is_valid_model_filters(models.OperationLog, filters): - return [] - - session = get_session() - with session.begin(): - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.OperationLog) - # No restores would match, return empty list - if query is None: - return [] - return query.all() - - -@require_context -def operation_log_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - offset=None): - """Retrieves all operation logs in a project. - - If no sort parameters are specified then the returned plans are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param project_id: project for all plans being retrieved - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_plan_filters - function for more information - :returns: list of matching restores - """ - if filters and not is_valid_model_filters(models.OperationLog, filters): - return [] - - session = get_session() - with session.begin(): - authorize_project_context(context, project_id) - # Add in the project filter without modifying the given filters - filters = filters.copy() if filters else {} - filters['project_id'] = project_id - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.OperationLog) - # No plans would match, return empty list - if query is None: - return [] - return query.all() - - -def _process_operation_log_filters(query, filters): - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.OperationLog, filters): - return None - query = query.filter_by(**filters) - return query -############################### - - -@require_context -def verification_create(context, values): - verification_ref = models.Verification() - if not values.get('id'): - values['id'] = uuidutils.generate_uuid() - verification_ref.update(values) - - session = get_session() - with session.begin(): - verification_ref.save(session) - return verification_ref - - -@require_context -def verification_get(context, verification_id): - return _verification_get(context, verification_id) - - -@require_context -def _verification_get(context, verification_id, session=None): - result = model_query( - context, - models.Verification, - session=session - ).filter_by( - id=verification_id - ).first() - if not result: - raise exception.VerificationNotFound( - verification_id=verification_id) - - return result - - -@require_context -def verification_update(context, verification_id, values): - session = get_session() - with session.begin(): - verification_ref = _verification_get( - context, verification_id, session=session) - verification_ref.update(values) - return verification_ref - - -@require_context -@_retry_on_deadlock -def verification_destroy(context, verification_id): - session = get_session() - with session.begin(): - verification_ref = _verification_get(context, - verification_id, - session=session) - verification_ref.delete(session=session) - - -def _verification_get_query(context, session=None, project_only=False): - return model_query(context, models.Verification, session=session, - project_only=project_only) - - -@require_admin_context -def verification_get_all(context, marker, limit, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - """Retrieves all verifications. - - If no sort parameters are specified then the returned plans are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_verification_filters - function for more information - :param offset: number of items to skip - :returns: list of matching verifications - """ - if filters and not is_valid_model_filters(models.Verification, filters): - return [] - - session = get_session() - with session.begin(): - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.Verification) - if query is None: - return [] - return query.all() - - -@require_context -def verification_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, offset=None): - """Retrieves all verifications in a project. - - If no sort parameters are specified then the returned plans are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param project_id: project for all verifications being retrieved - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_verification_filters - function for more information - :param offset: number of items to skip - :returns: list of matching verifications - """ - if filters and not is_valid_model_filters(models.Verification, filters): - return [] - - session = get_session() - with session.begin(): - authorize_project_context(context, project_id) - filters = filters.copy() if filters else {} - filters['project_id'] = project_id - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.Verification) - if query is None: - return [] - return query.all() - - -def _process_verification_filters(query, filters): - if filters: - if not is_valid_model_filters(models.Verification, filters): - return None - query = query.filter_by(**filters) - return query -############################### - - -@require_context -def checkpoint_record_create(context, values): - checkpoint_record_ref = models.CheckpointRecord() - if not values.get('id'): - values['id'] = uuidutils.generate_uuid() - checkpoint_record_ref.update(values) - - session = get_session() - with session.begin(): - checkpoint_record_ref.save(session) - return checkpoint_record_ref - - -@require_context -def checkpoint_record_get(context, checkpoint_record_id): - return _checkpoint_record_get(context, checkpoint_record_id) - - -@require_context -def _checkpoint_record_get(context, checkpoint_record_id, session=None): - result = model_query( - context, - models.CheckpointRecord, - session=session).filter_by( - id=checkpoint_record_id).first() - if not result: - raise exception.CheckpointRecordNotFound(id=checkpoint_record_id) - - return result - - -@require_context -def checkpoint_record_update(context, checkpoint_record_id, values): - session = get_session() - with session.begin(): - checkpoint_record_ref = _checkpoint_record_get(context, - checkpoint_record_id, - session=session) - checkpoint_record_ref.update(values) - return checkpoint_record_ref - - -@require_context -@_retry_on_deadlock -def checkpoint_record_destroy(context, checkpoint_record_id): - session = get_session() - with session.begin(): - checkpoint_record_ref = _checkpoint_record_get(context, - checkpoint_record_id, - session=session) - checkpoint_record_ref.delete(session=session) - - -def _checkpoint_record_list_query(context, session, **kwargs): - return model_query(context, models.CheckpointRecord, session=session) - - -def _checkpoint_record_list_process_filters(query, filters): - exact_match_filter_names = ['project_id', 'id', - 'checkpoint_id', 'checkpoint_status', - 'plan_id', 'provider_id', 'operation_id'] - query = _list_common_process_exact_filter( - models.CheckpointRecord, query, filters, - exact_match_filter_names) - - regex_match_filter_names = ['create_by'] - query = _list_common_process_regex_filter( - models.CheckpointRecord, query, filters, - regex_match_filter_names) - - return query - - -def checkpoint_record_get_all_by_filters_sort( - context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None): - - session = get_session() - with session.begin(): - query = _generate_paginate_query( - context, session, marker, limit, - sort_keys, sort_dirs, filters, - paginate_type=models.CheckpointRecord, - use_model=True) - - return query.all() if query else [] -############################### - - -@require_context -def _list_common_get_query(context, model, session=None): - return model_query(context, model, session=session) - - -def _list_common_process_exact_filter(model, query, filters, legal_keys): - """Applies exact match filtering to a query. - - :param model: model to apply filters to - :param query: query to apply filters to - :param filters: dictionary of filters; values that are lists, - tuples, sets, or frozensets cause an 'IN' test to - be performed, while exact matching ('==' operator) - is used for other values - :param legal_keys: list of keys to apply exact filtering to - :returns: the updated query. - """ - - filter_dict = {} - for key in legal_keys: - if key not in filters: - continue - - value = filters.get(key) - if isinstance(value, (list, tuple, set, frozenset)): - if not value: - return None # empty IN-predicate; short circuit - # Looking for values in a list; apply to query directly - column_attr = getattr(model, key) - query = query.filter(column_attr.in_(value)) - else: - # OK, simple exact match; save for later - filter_dict[key] = value - - # Apply simple exact matches - if filter_dict: - query = query.filter_by(**filter_dict) - - return query - - -def _list_common_process_regex_filter(model, query, filters, legal_keys): - """Applies regular expression filtering to a query. - - :param model: model to apply filters to - :param query: query to apply filters to - :param filters: dictionary of filters with regex values - :param legal_keys: list of keys to apply regex filtering to - :returns: the updated query. - """ - - def _get_regexp_op_for_connection(db_connection): - db_string = db_connection.split(':')[0].split('+')[0] - regexp_op_map = { - 'postgresql': '~', - 'mysql': 'REGEXP', - 'sqlite': 'REGEXP' - } - return regexp_op_map.get(db_string, 'LIKE') - - db_regexp_op = _get_regexp_op_for_connection(CONF.database.connection) - for key in legal_keys: - if key not in filters: - continue - - value = filters[key] - if not isinstance(value, six.string_types): - continue - - column_attr = getattr(model, key) - if db_regexp_op == 'LIKE': - query = query.filter(column_attr.op(db_regexp_op)( - u'%' + value + u'%')) - else: - query = query.filter(column_attr.op(db_regexp_op)( - value)) - return query - - -PAGINATION_HELPERS = { - models.Plan: (_plan_get_query, _process_plan_filters, _plan_get), - models.Restore: (_restore_get_query, _process_restore_filters, - _restore_get), - models.Verification: ( - _verification_get_query, - _process_verification_filters, - _verification_get), - models.Trigger: (_trigger_list_query, _trigger_list_process_filters, - _trigger_get), - models.TriggerExecution: (_trigger_execution_list_query, - _trigger_execution_list_process_filters, - _trigger_execution_get), - models.ScheduledOperation: (_scheduled_operation_list_query, - _scheduled_operation_list_process_filters, - _scheduled_operation_get), - - models.ScheduledOperationState: ( - _scheduled_operation_state_list_query, - _scheduled_operation_state_list_process_filters, - _scheduled_operation_state_get), - - models.OperationLog: (_operation_log_get_query, - _process_operation_log_filters, - _operation_log_get), - - models.ScheduledOperationLog: ( - _scheduled_operation_log_list_query, - _scheduled_operation_log_list_process_filters, - _scheduled_operation_log_get), - models.CheckpointRecord: ( - _checkpoint_record_list_query, - _checkpoint_record_list_process_filters, - _checkpoint_record_get), -} - - -############################### - - -def _generate_paginate_query(context, session, marker, limit, sort_keys, - sort_dirs, filters, offset=None, - paginate_type=models.Plan, use_model=False, - **kwargs): - """Generate the query to include the filters and the paginate options. - - Returns a query with sorting / pagination criteria added or None - if the given filters will not yield any results. - - :param context: context to query under - :param session: the session to use - :param marker: the last item of the previous page; we returns the next - results after this value. - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_plan_filters - function for more information - :param offset: number of items to skip - :param paginate_type: type of pagination to generate - :returns: updated query or None - """ - get_query, process_filters, get = PAGINATION_HELPERS[paginate_type] - - sort_keys, sort_dirs = process_sort_params(sort_keys, - sort_dirs, - default_dir='desc') - if use_model: - query = get_query(context, session=session, **kwargs) - else: - query = get_query(context, session=session) - - if filters: - query = process_filters(query, filters) - if query is None: - return None - - marker_object = None - if marker is not None: - marker_object = get(context, marker, session=session) - - query = sqlalchemyutils.paginate_query(query, paginate_type, limit, - sort_keys, - marker=marker_object, - sort_dirs=sort_dirs) - if offset: - query = query.offset(offset) - return query - - -def process_sort_params(sort_keys, sort_dirs, default_keys=None, - default_dir='asc'): - """Process the sort parameters to include default keys. - - Creates a list of sort keys and a list of sort directions. Adds the default - keys to the end of the list if they are not already included. - - When adding the default keys to the sort keys list, the associated - direction is: - 1) The first element in the 'sort_dirs' list (if specified), else - 2) 'default_dir' value (Note that 'asc' is the default value since this is - the default in sqlalchemy.utils.paginate_query) - - :param sort_keys: List of sort keys to include in the processed list - :param sort_dirs: List of sort directions to include in the processed list - :param default_keys: List of sort keys that need to be included in the - processed list, they are added at the end of the list - if not already specified. - :param default_dir: Sort direction associated with each of the default - keys that are not supplied, used when they are added - to the processed list - :returns: list of sort keys, list of sort directions - :raise exception.InvalidInput: If more sort directions than sort keys - are specified or if an invalid sort - direction is specified - """ - if default_keys is None: - default_keys = ['created_at', 'id'] - - # Determine direction to use for when adding default keys - if sort_dirs and len(sort_dirs): - default_dir_value = sort_dirs[0] - else: - default_dir_value = default_dir - - # Create list of keys (do not modify the input list) - if sort_keys: - result_keys = list(sort_keys) - else: - result_keys = [] - - # If a list of directions is not provided, use the default sort direction - # for all provided keys. - if sort_dirs: - result_dirs = [] - # Verify sort direction - for sort_dir in sort_dirs: - if sort_dir not in ('asc', 'desc'): - msg = _("Unknown sort direction, must be 'desc' or 'asc'.") - raise exception.InvalidInput(reason=msg) - result_dirs.append(sort_dir) - else: - result_dirs = [default_dir_value for _sort_key in result_keys] - - # Ensure that the key and direction length match - while len(result_dirs) < len(result_keys): - result_dirs.append(default_dir_value) - # Unless more direction are specified, which is an error - if len(result_dirs) > len(result_keys): - msg = _("Sort direction array size exceeds sort key array size.") - raise exception.InvalidInput(reason=msg) - - # Ensure defaults are included - for key in default_keys: - if key not in result_keys: - result_keys.append(key) - result_dirs.append(default_dir_value) - - return result_keys, result_dirs - - -@require_admin_context -def purge_deleted_rows(context, age_in_days): - """Purge deleted rows older than age from karbor tables.""" - try: - age_in_days = int(age_in_days) - except ValueError: - msg = _('Invalid valude for age, %(age)s') - LOG.exception(msg, {'age': age_in_days}) - raise exception.InvalidParameterValue(msg % {'age': age_in_days}) - if age_in_days <= 0: - msg = _('Must supply a positive value for age') - LOG.exception(msg) - raise exception.InvalidParameterValue(msg) - - engine = get_engine() - session = get_session() - metadata = MetaData() - metadata.bind = engine - tables = [] - - for model_class in models.__dict__.values(): - if hasattr(model_class, "__tablename__") and hasattr( - model_class, "deleted"): - tables.append(model_class.__tablename__) - - # Reorder the list so the tables are last to avoid ForeignKey constraints - # get rid of FK constraints - for tbl in ('plans', 'scheduled_operations'): - try: - tables.remove(tbl) - except ValueError: - LOG.warning('Expected table %(tbl)s was not found in DB.', - **locals()) - else: - tables.append(tbl) - - for table in tables: - t = Table(table, metadata, autoload=True) - LOG.info('Purging deleted rows older than age=%(age)d days from ' - 'table=%(table)s', {'age': age_in_days, 'table': table}) - deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days) - try: - with session.begin(): - result = session.execute( - t.delete() - .where(t.c.deleted_at < deleted_age)) - except db_exc.DBReferenceError: - LOG.exception('DBError detected when purging from ' - 'table=%(table)s', {'table': table}) - raise - - rows_purged = result.rowcount - LOG.info("Deleted %(row)d rows from table=%(table)s", - {'row': rows_purged, 'table': table}) - - -################### - - -@require_context -def quota_get(context, project_id, resource, session=None): - result = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.ProjectQuotaNotFound(project_id=project_id) - - return result - - -@require_context -def quota_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - rows = model_query(context, models.Quota, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - result = {'project_id': project_id} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_admin_context -def quota_create(context, project_id, resource, limit): - quota_ref = models.Quota() - quota_ref.project_id = project_id - quota_ref.resource = resource - quota_ref.hard_limit = limit - session = get_session() - with session.begin(): - quota_ref.save(session) - return quota_ref - - -@require_admin_context -def quota_update(context, project_id, resource, limit): - session = get_session() - with session.begin(): - quota_ref = quota_get(context, project_id, resource, session=session) - quota_ref.hard_limit = limit - quota_ref.save(session=session) - - -@require_admin_context -def quota_destroy(context, project_id, resource): - session = get_session() - with session.begin(): - quota_ref = quota_get(context, project_id, resource, session=session) - quota_ref.delete(session=session) - - -################### - - -@require_context -def quota_class_get(context, class_name, resource, session=None): - result = model_query(context, models.QuotaClass, session=session, - read_deleted="no").\ - filter_by(class_name=class_name).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.QuotaClassNotFound(class_name=class_name) - - return result - - -@require_context -def quota_class_get_all_by_name(context, class_name): - authorize_quota_class_context(context, class_name) - - rows = model_query(context, models.QuotaClass, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - - result = {'class_name': class_name} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -def authorize_quota_class_context(context, class_name): - """Ensures a request has permission to access the given quota class.""" - if is_user_context(context): - if not context.quota_class: - raise exception.NotAuthorized() - elif context.quota_class != class_name: - raise exception.NotAuthorized() - - -@require_admin_context -def quota_class_create(context, class_name, resource, limit): - quota_class_ref = models.QuotaClass() - quota_class_ref.class_name = class_name - quota_class_ref.resource = resource - quota_class_ref.hard_limit = limit - session = get_session() - with session.begin(): - quota_class_ref.save(session) - return quota_class_ref - - -@require_admin_context -def quota_class_update(context, class_name, resource, limit): - session = get_session() - with session.begin(): - quota_class_ref = quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.hard_limit = limit - quota_class_ref.save(session=session) - - -@require_admin_context -def quota_class_destroy(context, class_name, resource): - session = get_session() - with session.begin(): - quota_class_ref = quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.delete(session=session) - - -@require_admin_context -def quota_class_destroy_all_by_name(context, class_name): - session = get_session() - with session.begin(): - quota_classes = model_query(context, models.QuotaClass, - session=session, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - - for quota_class_ref in quota_classes: - quota_class_ref.delete(session=session) - - -################### - - -@require_context -def quota_usage_get(context, project_id, resource, session=None): - result = model_query(context, models.QuotaUsage, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.QuotaUsageNotFound(project_id=project_id) - - return result - - -@require_context -def quota_usage_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - rows = model_query(context, models.QuotaUsage, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - result = {'project_id': project_id} - for row in rows: - result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) - - return result - - -@require_admin_context -def quota_usage_create(context, project_id, resource, in_use, reserved, - until_refresh, session=None): - quota_usage_ref = models.QuotaUsage() - quota_usage_ref.project_id = project_id - quota_usage_ref.resource = resource - quota_usage_ref.in_use = in_use - quota_usage_ref.reserved = reserved - quota_usage_ref.until_refresh = until_refresh - if not session: - session = get_session() - with session.begin(): - quota_usage_ref.save(session=session) - else: - quota_usage_ref.save(session=session) - - return quota_usage_ref - - -################### - - -@require_context -def reservation_get(context, uuid, session=None): - result = model_query(context, models.Reservation, session=session, - read_deleted="no").\ - filter_by(uuid=uuid).first() - - if not result: - raise exception.ReservationNotFound(uuid=uuid) - - return result - - -@require_context -def reservation_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - rows = model_query(context, models.Reservation, read_deleted="no").\ - filter_by(project_id=project_id).all() - - result = {'project_id': project_id} - for row in rows: - result.setdefault(row.resource, {}) - result[row.resource][row.uuid] = row.delta - - return result - - -@require_admin_context -def reservation_create(context, uuid, usage, project_id, resource, delta, - expire, session=None): - reservation_ref = models.Reservation() - reservation_ref.uuid = uuid - reservation_ref.usage_id = usage['id'] - reservation_ref.project_id = project_id - reservation_ref.resource = resource - reservation_ref.delta = delta - reservation_ref.expire = expire - if not session: - session = get_session() - with session.begin(): - reservation_ref.save(session=session) - else: - reservation_ref.save(session=session) - return reservation_ref - - -@require_admin_context -def reservation_destroy(context, uuid): - session = get_session() - with session.begin(): - reservation_ref = reservation_get(context, uuid, session=session) - reservation_ref.delete(session=session) - - -################### - - -# NOTE(johannes): The quota code uses SQL locking to ensure races don't -# cause under or over counting of resources. To avoid deadlocks, this -# code always acquires the lock on quota_usages before acquiring the lock -# on reservations. - -def _get_quota_usages(context, session, project_id): - # Broken out for testability - rows = model_query(context, models.QuotaUsage, - read_deleted="no", - session=session).\ - filter_by(project_id=project_id).\ - with_lockmode('update').\ - all() - return dict((row.resource, row) for row in rows) - - -@require_context -def quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age, project_id=None): - elevated = context.elevated() - session = get_session() - with session.begin(): - if project_id is None: - project_id = context.project_id - - # Get the current usages - usages = _get_quota_usages(context, session, project_id) - - # Handle usage refresh - work = set(deltas.keys()) - while work: - resource = work.pop() - - # Do we need to refresh the usage? - refresh = False - if resource not in usages: - usages[resource] = quota_usage_create(elevated, - project_id, - resource, - 0, 0, - until_refresh or None, - session=session) - refresh = True - elif usages[resource].in_use < 0: - # Negative in_use count indicates a desync, so try to - # heal from that... - refresh = True - elif usages[resource].until_refresh is not None: - usages[resource].until_refresh -= 1 - if usages[resource].until_refresh <= 0: - refresh = True - elif max_age and (usages[resource].updated_at - - timeutils.utcnow()).seconds >= max_age: - refresh = True - - # OK, refresh the usage - if refresh: - # Grab the sync routine - sync = resources[resource].sync - updates = {} - if sync: - updates = sync(elevated, project_id, session) - for res, in_use in updates.items(): - # Make sure we have a destination for the usage! - if res not in usages: - usages[res] = quota_usage_create(elevated, - project_id, - res, - 0, 0, - until_refresh or None, - session=session) - - # Update the usage - usages[res].in_use = in_use - usages[res].until_refresh = until_refresh or None - - # Because more than one resource may be refreshed - # by the call to the sync routine, and we don't - # want to double-sync, we make sure all refreshed - # resources are dropped from the work set. - work.discard(res) - - # NOTE(Vek): We make the assumption that the sync - # routine actually refreshes the - # resources that it is the sync routine - # for. We don't check, because this is - # a best-effort mechanism. - - # Check for deltas that would go negative - unders = [res for res, delta in deltas.items() - if delta < 0 and - delta + usages[res].in_use < 0] - - # Now, let's check the quotas - # NOTE(Vek): We're only concerned about positive increments. - # If a project has gone over quota, we want them to - # be able to reduce their usage without any - # problems. - overs = [res for res, delta in deltas.items() - if quotas[res] >= 0 and delta >= 0 and - quotas[res] < delta + usages[res].total] - - # NOTE(Vek): The quota check needs to be in the transaction, - # but the transaction doesn't fail just because - # we're over quota, so the OverQuota raise is - # outside the transaction. If we did the raise - # here, our usage updates would be discarded, but - # they're not invalidated by being over-quota. - - # Create the reservations - if not overs: - reservations = [] - for resource, delta in deltas.items(): - reservation = reservation_create(elevated, - str(uuid.uuid4()), - usages[resource], - project_id, - resource, delta, expire, - session=session) - reservations.append(reservation.uuid) - - # Also update the reserved quantity - # NOTE(Vek): Again, we are only concerned here about - # positive increments. Here, though, we're - # worried about the following scenario: - # - # 1) User initiates resize down. - # 2) User allocates a new instance. - # 3) Resize down fails or is reverted. - # 4) User is now over quota. - # - # To prevent this, we only update the - # reserved value if the delta is positive. - if delta > 0: - usages[resource].reserved += delta - - # Apply updates to the usages table - for usage_ref in usages.values(): - usage_ref.save(session=session) - - if unders: - LOG.warning(_("Change will make usage less than 0 for the following " - "resources: %(unders)s") % unders) - if overs: - usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) - for k, v in usages.items()) - raise exception.OverQuota(overs=sorted(overs), quotas=quotas, - usages=usages) - - return reservations - - -def _quota_reservations(session, context, reservations): - """Return the relevant reservations.""" - - # Get the listed reservations - return model_query(context, models.Reservation, - read_deleted="no", - session=session).\ - filter(models.Reservation.uuid.in_(reservations)).\ - with_lockmode('update').\ - all() - - -@require_context -def reservation_commit(context, reservations, project_id=None): - session = get_session() - with session.begin(): - usages = _get_quota_usages(context, session, project_id) - - for reservation in _quota_reservations(session, context, reservations): - usage = usages[reservation.resource] - if reservation.delta >= 0: - usage.reserved -= reservation.delta - usage.in_use += reservation.delta - - reservation.delete(session=session) - - for usage in usages.values(): - usage.save(session=session) - - -@require_context -def reservation_rollback(context, reservations, project_id=None): - session = get_session() - with session.begin(): - usages = _get_quota_usages(context, session, project_id) - - for reservation in _quota_reservations(session, context, reservations): - usage = usages[reservation.resource] - if reservation.delta >= 0: - usage.reserved -= reservation.delta - - reservation.delete(session=session) - - for usage in usages.values(): - usage.save(session=session) - - -@require_admin_context -def quota_destroy_all_by_project(context, project_id): - session = get_session() - with session.begin(): - quotas = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for quota_ref in quotas: - quota_ref.delete(session=session) - - quota_usages = model_query(context, models.QuotaUsage, - session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for quota_usage_ref in quota_usages: - quota_usage_ref.delete(session=session) - - reservations = model_query(context, models.Reservation, - session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for reservation_ref in reservations: - reservation_ref.delete(session=session) - - -@require_admin_context -def reservation_expire(context): - session = get_session() - with session.begin(): - current_time = timeutils.utcnow() - results = model_query(context, models.Reservation, session=session, - read_deleted="no").\ - filter(models.Reservation.expire < current_time).\ - all() - - if results: - for reservation in results: - if reservation.delta >= 0: - reservation.usage.reserved -= reservation.delta - reservation.usage.save(session=session) - - reservation.delete(session=session) - - -################ diff --git a/karbor/db/sqlalchemy/migrate_repo/README b/karbor/db/sqlalchemy/migrate_repo/README deleted file mode 100644 index 6218f8ca..00000000 --- a/karbor/db/sqlalchemy/migrate_repo/README +++ /dev/null @@ -1,4 +0,0 @@ -This is a database migration repository. - -More information at -http://code.google.com/p/sqlalchemy-migrate/ diff --git a/karbor/db/sqlalchemy/migrate_repo/__init__.py b/karbor/db/sqlalchemy/migrate_repo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/db/sqlalchemy/migrate_repo/manage.py b/karbor/db/sqlalchemy/migrate_repo/manage.py deleted file mode 100644 index d544b97f..00000000 --- a/karbor/db/sqlalchemy/migrate_repo/manage.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from karbor.db.sqlalchemy import migrate_repo - -from migrate.versioning.shell import main - - -if __name__ == '__main__': - main(debug='False', - repository=os.path.abspath(os.path.dirname(migrate_repo.__file__))) diff --git a/karbor/db/sqlalchemy/migrate_repo/migrate.cfg b/karbor/db/sqlalchemy/migrate_repo/migrate.cfg deleted file mode 100644 index 692c7a90..00000000 --- a/karbor/db/sqlalchemy/migrate_repo/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=karbor - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/karbor/db/sqlalchemy/migrate_repo/versions/001_karbor_init.py b/karbor/db/sqlalchemy/migrate_repo/versions/001_karbor_init.py deleted file mode 100644 index 1501efa3..00000000 --- a/karbor/db/sqlalchemy/migrate_repo/versions/001_karbor_init.py +++ /dev/null @@ -1,261 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime, ForeignKey -from sqlalchemy import Integer, MetaData, String, Table, Text - - -def define_tables(meta): - - services = Table( - 'services', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True, nullable=False), - Column('host', String(length=255)), - Column('binary', String(length=255)), - Column('topic', String(length=255)), - Column('report_count', Integer, nullable=False), - Column('disabled', Boolean), - Column('disabled_reason', String(length=255)), - Column('modified_at', DateTime), - Column('rpc_current_version', String(36)), - Column('rpc_available_version', String(36)), - mysql_engine='InnoDB' - ) - - plans = Table( - 'plans', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('name', String(length=255)), - Column('description', String(length=255)), - Column('provider_id', String(length=36)), - Column('project_id', String(length=255)), - Column('status', String(length=64)), - Column('parameters', Text), - mysql_engine='InnoDB' - ) - - resources = Table( - 'resources', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True, nullable=False), - Column('plan_id', String(length=36), ForeignKey('plans.id'), - nullable=False), - Column('resource_id', String(length=36)), - Column('resource_type', String(length=64)), - Column('resource_name', String(length=255)), - Column('resource_extra_info', Text), - mysql_engine='InnoDB' - ) - - restores = Table( - 'restores', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('project_id', String(length=255)), - Column('provider_id', String(length=36)), - Column('checkpoint_id', String(length=36)), - Column('restore_target', String(length=255)), - Column('parameters', Text), - Column('status', String(length=64)), - Column('resources_status', Text), - Column('resources_reason', Text), - mysql_engine='InnoDB' - ) - - operation_logs = Table( - 'operation_logs', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean, nullable=False), - Column('id', String(length=36), primary_key=True, nullable=False), - Column('project_id', String(length=255), nullable=False), - Column('operation_type', String(length=255), nullable=False), - Column('checkpoint_id', String(length=36)), - Column('plan_id', String(length=36)), - Column('provider_id', String(length=36)), - Column('restore_id', String(length=36)), - Column('scheduled_operation_id', String(length=36)), - Column('status', String(length=64)), - Column('started_at', DateTime), - Column('ended_at', DateTime), - Column('error_info', Text), - Column('extra_info', Text), - mysql_engine='InnoDB' - ) - - triggers = Table( - 'triggers', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean, nullable=False), - Column('id', String(length=36), primary_key=True, nullable=False), - Column('name', String(length=255), nullable=False), - Column('project_id', String(length=255), nullable=False), - Column('type', String(length=64), nullable=False), - Column('properties', Text, nullable=False), - mysql_engine='InnoDB' - ) - - trigger_executions = Table( - 'trigger_executions', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean, nullable=False), - Column('id', String(length=36), primary_key=True, nullable=False), - Column('trigger_id', String(length=36), unique=True, nullable=False, - index=True), - Column('execution_time', DateTime, nullable=False, index=True), - mysql_engine='InnoDB' - ) - - scheduled_operations = Table( - 'scheduled_operations', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean, nullable=False), - Column('id', String(length=36), primary_key=True, nullable=False), - Column('name', String(length=255), nullable=False), - Column('description', String(length=255)), - Column('operation_type', String(length=64), nullable=False), - Column('user_id', String(length=64), nullable=False), - Column('project_id', String(length=255), nullable=False), - Column('trigger_id', String(length=36), ForeignKey('triggers.id'), - index=True, nullable=False), - Column('operation_definition', Text, nullable=False), - Column('enabled', Boolean, nullable=False, default=True), - mysql_engine='InnoDB' - ) - - scheduled_operation_states = Table( - 'scheduled_operation_states', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean, nullable=False), - Column('id', Integer, primary_key=True, nullable=False, - autoincrement=True), - Column('operation_id', String(length=36), - ForeignKey('scheduled_operations.id', ondelete='CASCADE'), - index=True, unique=True, nullable=False), - Column('service_id', Integer, ForeignKey('services.id'), - nullable=False), - Column('trust_id', String(length=64), nullable=False), - Column('state', String(length=32), nullable=False), - Column('end_time_for_run', DateTime), - mysql_engine='InnoDB' - ) - - scheduled_operation_logs = Table( - 'scheduled_operation_logs', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean, nullable=False), - Column('id', Integer, primary_key=True, nullable=False, - autoincrement=True), - Column('operation_id', String(length=36), - ForeignKey('scheduled_operations.id', ondelete='CASCADE'), - index=True, nullable=False), - Column('expect_start_time', DateTime), - Column('triggered_time', DateTime), - Column('actual_start_time', DateTime), - Column('end_time', DateTime), - Column('state', String(length=32), nullable=False), - Column('extend_info', Text), - mysql_engine='InnoDB' - ) - - checkpoint_records = Table( - 'checkpoint_records', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean, nullable=False), - Column('id', String(length=36), primary_key=True, nullable=False), - Column('project_id', String(length=36), nullable=False), - Column('checkpoint_id', String(length=36), nullable=False), - Column('checkpoint_status', String(length=36), nullable=False), - Column('provider_id', String(length=36), nullable=False), - Column('plan_id', String(length=36), nullable=False), - Column('operation_id', String(length=36)), - Column('create_by', String(length=36)), - Column('extend_info', Text), - mysql_engine='InnoDB' - ) - - return [services, - plans, - resources, - restores, - operation_logs, - triggers, - trigger_executions, - scheduled_operations, - scheduled_operation_states, - scheduled_operation_logs, - checkpoint_records] - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # create all tables - # Take care on create order for those with FK dependencies - tables = define_tables(meta) - - for table in tables: - table.create() - - if migrate_engine.name == "mysql": - table_names = [t.description for t in tables] - table_names.append("migrate_version") - - migrate_engine.execute("SET foreign_key_checks = 0") - for table in table_names: - migrate_engine.execute( - "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) - migrate_engine.execute("SET foreign_key_checks = 1") - migrate_engine.execute( - "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % - migrate_engine.url.database) - migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table) - - -def downgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - tables = define_tables(meta) - tables.reverse() - for table in tables: - table.drop() diff --git a/karbor/db/sqlalchemy/migrate_repo/versions/002_add_verification_table.py b/karbor/db/sqlalchemy/migrate_repo/versions/002_add_verification_table.py deleted file mode 100644 index f9ce9e42..00000000 --- a/karbor/db/sqlalchemy/migrate_repo/versions/002_add_verification_table.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime -from sqlalchemy import MetaData, String, Table, Text - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # New table - verifications = Table( - 'verifications', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('project_id', String(length=255), nullable=False), - Column('provider_id', String(length=36), nullable=False), - Column('checkpoint_id', String(length=36), nullable=False), - Column('status', String(length=64)), - Column('parameters', Text), - Column('resources_status', Text), - Column('resources_reason', Text), - mysql_engine='InnoDB' - ) - - verifications.create() diff --git a/karbor/db/sqlalchemy/migrate_repo/versions/003_add_quotas_table.py b/karbor/db/sqlalchemy/migrate_repo/versions/003_add_quotas_table.py deleted file mode 100644 index 6590b3ce..00000000 --- a/karbor/db/sqlalchemy/migrate_repo/versions/003_add_quotas_table.py +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime, Integer -from sqlalchemy import MetaData, String, Table, ForeignKey - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - quotas = Table( - 'quotas', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True), - Column('project_id', String(length=255), nullable=False), - Column('resource', String(length=255), nullable=False), - Column('hard_limit', Integer), - mysql_engine='InnoDB' - ) - - quotas.create() - - quota_classes = Table( - 'quota_classes', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True), - Column('class_name', String(length=255), nullable=False), - Column('resource', String(length=255), nullable=False), - Column('hard_limit', Integer), - mysql_engine='InnoDB' - ) - quota_classes.create() - - quota_usages = Table( - 'quota_usages', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True), - Column('project_id', String(length=255), nullable=False), - Column('resource', String(length=255), nullable=False), - Column('in_use', Integer), - Column('reserved', Integer), - Column('until_refresh', Integer, nullable=True), - mysql_engine='InnoDB' - ) - - quota_usages.create() - - reservations = Table( - 'reservations', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True), - Column('uuid', String(length=36), nullable=False), - Column('usage_id', Integer, ForeignKey('quota_usages.id'), - nullable=False), - Column('project_id', String(length=255), index=True), - Column('resource', String(length=255)), - Column('delta', Integer, nullable=False), - Column('expire', DateTime), - mysql_engine='InnoDB' - ) - - reservations.create() diff --git a/karbor/db/sqlalchemy/migrate_repo/versions/__init__.py b/karbor/db/sqlalchemy/migrate_repo/versions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/db/sqlalchemy/models.py b/karbor/db/sqlalchemy/models.py deleted file mode 100644 index aa1c4b2d..00000000 --- a/karbor/db/sqlalchemy/models.py +++ /dev/null @@ -1,346 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -SQLAlchemy models for karbor data. -""" - -from oslo_config import cfg -from oslo_db.sqlalchemy import models -from oslo_utils import timeutils -from sqlalchemy import Column, Integer, String, Text -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import DateTime, Boolean, ForeignKey -from sqlalchemy import orm - -CONF = cfg.CONF -BASE = declarative_base() - - -class KarborBase(models.TimestampMixin, - models.ModelBase): - """Base class for karbor Models.""" - - __table_args__ = {'mysql_engine': 'InnoDB'} - - deleted_at = Column(DateTime) - deleted = Column(Boolean, default=False) - metadata = None - - def delete(self, session): - """Delete this object.""" - self.deleted = True - self.deleted_at = timeutils.utcnow() - self.save(session=session) - - -class Service(BASE, KarborBase): - """Represents a running service on a host.""" - - __tablename__ = 'services' - id = Column(Integer, primary_key=True) - host = Column(String(255)) # , ForeignKey('hosts.id')) - binary = Column(String(255)) - topic = Column(String(255)) - report_count = Column(Integer, nullable=False, default=0) - disabled = Column(Boolean, default=False) - disabled_reason = Column(String(255)) - # adding column modified_at to contain timestamp - # for manual enable/disable of karbor services - # updated_at column will now contain timestamps for - # periodic updates - modified_at = Column(DateTime) - rpc_current_version = Column(String(36)) - rpc_available_version = Column(String(36)) - - -class Trigger(BASE, KarborBase): - """Represents a trigger.""" - - __tablename__ = 'triggers' - - id = Column(String(36), primary_key=True, nullable=False) - name = Column(String(255), nullable=False) - project_id = Column(String(255), nullable=False) - type = Column(String(64), nullable=False) - properties = Column(Text, nullable=False) - - -class TriggerExecution(BASE, KarborBase): - """Represents a future trigger execition""" - - __tablename__ = 'trigger_executions' - - id = Column(String(36), primary_key=True, nullable=False) - trigger_id = Column(String(36), unique=True, nullable=False, index=True) - execution_time = Column(DateTime, nullable=False, index=True) - - -class ScheduledOperation(BASE, KarborBase): - """Represents a scheduled operation.""" - - __tablename__ = 'scheduled_operations' - - id = Column(String(36), primary_key=True, nullable=False) - name = Column(String(255), nullable=False) - description = Column(String(255)) - operation_type = Column(String(64), nullable=False) - user_id = Column(String(64), nullable=False) - project_id = Column(String(255), nullable=False) - trigger_id = Column(String(36), ForeignKey('triggers.id'), - index=True, nullable=False) - operation_definition = Column(Text, nullable=False) - enabled = Column(Boolean, default=True) - - trigger = orm.relationship( - Trigger, - foreign_keys=trigger_id, - primaryjoin='and_(' - 'ScheduledOperation.trigger_id == Trigger.id,' - 'ScheduledOperation.deleted == 0,' - 'Trigger.deleted == 0)') - - -class ScheduledOperationState(BASE, KarborBase): - """Represents a scheduled operation state.""" - - __tablename__ = 'scheduled_operation_states' - - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - operation_id = Column(String(36), - ForeignKey('scheduled_operations.id', - ondelete='CASCADE'), - index=True, unique=True, - nullable=False) - service_id = Column(Integer, ForeignKey('services.id'), nullable=False) - trust_id = Column(String(64), nullable=False) - state = Column(String(32), nullable=False) - end_time_for_run = Column(DateTime) - - operation = orm.relationship( - ScheduledOperation, - foreign_keys=operation_id, - primaryjoin='and_(' - 'ScheduledOperationState.operation_id == ' - 'ScheduledOperation.id,' - 'ScheduledOperationState.deleted == 0,' - 'ScheduledOperation.deleted == 0)') - - -class ScheduledOperationLog(BASE, KarborBase): - """Represents a scheduled operation log.""" - - __tablename__ = 'scheduled_operation_logs' - - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - operation_id = Column(String(36), - ForeignKey('scheduled_operations.id', - ondelete='CASCADE'), - index=True, nullable=False) - expect_start_time = Column(DateTime) - triggered_time = Column(DateTime) - actual_start_time = Column(DateTime) - end_time = Column(DateTime) - state = Column(String(32), nullable=False) - extend_info = Column(Text) - - -class Plan(BASE, KarborBase): - """Represents a Plan.""" - - __tablename__ = 'plans' - id = Column(String(36), primary_key=True) - name = Column(String(255)) - description = Column(String(255)) - provider_id = Column(String(36)) - project_id = Column(String(255)) - status = Column(String(64)) - parameters = Column(Text) - - -class Resource(BASE, KarborBase): - """Represents a resource in a plan.""" - - __tablename__ = 'resources' - id = Column(Integer, primary_key=True) - resource_id = Column(String(36)) - resource_type = Column(String(64)) - resource_name = Column(String(255)) - resource_extra_info = Column(Text) - plan_id = Column(String(36), ForeignKey('plans.id'), nullable=False) - plan = orm.relationship(Plan, backref="resources", - foreign_keys=plan_id, - primaryjoin='and_(' - 'Resource.plan_id == Plan.id,' - 'Resource.deleted == False)') - - -class Restore(BASE, KarborBase): - """Represents a Restore.""" - - __tablename__ = 'restores' - id = Column(String(36), primary_key=True) - project_id = Column(String(255)) - provider_id = Column(String(36)) - checkpoint_id = Column(String(36)) - restore_target = Column(String(255)) - parameters = Column(Text) - status = Column(String(64)) - resources_status = Column(Text) - resources_reason = Column(Text) - - -class OperationLog(BASE, KarborBase): - """Represents a operation log.""" - - __tablename__ = 'operation_logs' - id = Column(String(36), primary_key=True) - project_id = Column(String(255)) - operation_type = Column(String(255)) - checkpoint_id = Column(String(36)) - plan_id = Column(String(36)) - provider_id = Column(String(36)) - restore_id = Column(String(36)) - scheduled_operation_id = Column(String(36)) - status = Column(String(64)) - started_at = Column(DateTime) - ended_at = Column(DateTime) - error_info = Column(Text) - extra_info = Column(Text) - - -class Verification(BASE, KarborBase): - """Represents a Verification.""" - - __tablename__ = 'verifications' - id = Column(String(36), primary_key=True) - project_id = Column(String(255)) - provider_id = Column(String(36)) - checkpoint_id = Column(String(36)) - parameters = Column(Text) - status = Column(String(64)) - resources_status = Column(Text) - resources_reason = Column(Text) - - -class CheckpointRecord(BASE, KarborBase): - """Represents a checkpoint record.""" - - __tablename__ = 'checkpoint_records' - - id = Column(String(36), primary_key=True, nullable=False) - project_id = Column(String(36), nullable=False) - checkpoint_id = Column(String(36), nullable=False) - checkpoint_status = Column(String(36), nullable=False) - provider_id = Column(String(36), nullable=False) - plan_id = Column(String(36), nullable=False) - operation_id = Column(String(36)) - create_by = Column(String(36)) - extend_info = Column(Text) - - -class Quota(BASE, KarborBase): - """Represents a single quota override for a project. - - If there is no row for a given project id and resource, then the - default for the quota class is used. If there is no row for a - given quota class and resource, then the default for the - deployment is used. If the row is present but the hard limit is - Null, then the resource is unlimited. - """ - - __tablename__ = 'quotas' - id = Column(Integer, primary_key=True) - - project_id = Column(String(255), index=True) - - resource = Column(String(255)) - hard_limit = Column(Integer, nullable=True) - - -class QuotaClass(BASE, KarborBase): - """Represents a single quota override for a quota class. - - If there is no row for a given quota class and resource, then the - default for the deployment is used. If the row is present but the - hard limit is Null, then the resource is unlimited. - """ - - __tablename__ = 'quota_classes' - id = Column(Integer, primary_key=True) - - class_name = Column(String(255), index=True) - - resource = Column(String(255)) - hard_limit = Column(Integer, nullable=True) - - -class QuotaUsage(BASE, KarborBase): - """Represents the current usage for a given resource.""" - - __tablename__ = 'quota_usages' - id = Column(Integer, primary_key=True) - - project_id = Column(String(255), index=True) - resource = Column(String(255)) - - in_use = Column(Integer) - reserved = Column(Integer) - - @property - def total(self): - return self.in_use + self.reserved - - until_refresh = Column(Integer, nullable=True) - - -class Reservation(BASE, KarborBase): - """Represents a resource reservation for quotas.""" - - __tablename__ = 'reservations' - id = Column(Integer, primary_key=True) - uuid = Column(String(36), nullable=False) - - usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) - - project_id = Column(String(255), index=True) - resource = Column(String(255)) - - delta = Column(Integer) - expire = Column(DateTime, nullable=False) - - -def register_models(): - """Register Models and create metadata. - - Called from karbor.db.sqlalchemy.__init__ as part of loading the driver, - it will never need to be called explicitly elsewhere unless the - connection is lost and needs to be reestablished. - """ - from sqlalchemy import create_engine - models = (Service, - Plan, - Resource, - Trigger, - TriggerExecution, - ScheduledOperation, - ScheduledOperationState, - ScheduledOperationLog, - Restore, - Verification, - CheckpointRecord, - Quota, - QuotaClass, - QuotaUsage, - Reservation) - engine = create_engine(CONF.database.connection, echo=False) - for model in models: - model.metadata.create_all(engine) diff --git a/karbor/exception.py b/karbor/exception.py deleted file mode 100644 index 7d3e96fa..00000000 --- a/karbor/exception.py +++ /dev/null @@ -1,463 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""karbor base exception handling. - -Includes decorator for re-raising karbor-type exceptions. - -SHOULD include dedicated exception logging. - -""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_versionedobjects import exception as obj_exc -import six -import webob.exc -from webob.util import status_generic_reasons -from webob.util import status_reasons - -from six.moves import http_client - -from karbor.i18n import _ - - -LOG = logging.getLogger(__name__) - -exc_log_opts = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help='Make exception message format errors fatal.'), -] - -CONF = cfg.CONF -CONF.register_opts(exc_log_opts) - - -class ConvertedException(webob.exc.WSGIHTTPException): - def __init__(self, code=500, title="", - explanation=""): - self.code = code - # There is a strict rule about constructing status line for HTTP: - # '...Status-Line, consisting of the protocol version followed by a - # numeric status code and its associated textual phrase, with each - # element separated by SP characters' - # (http://www.faqs.org/rfcs/rfc2616.html) - # 'code' and 'title' can not be empty because they correspond - # to numeric status code and its associated text - if title: - self.title = title - else: - try: - self.title = status_reasons[self.code] - except KeyError: - generic_code = self.code // 100 - self.title = status_generic_reasons[generic_code] - self.explanation = explanation - super(ConvertedException, self).__init__() - - -class Error(Exception): - pass - - -class KarborException(Exception): - """Base karbor Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - - """ - message = _("An unknown exception occurred.") - code = http_client.INTERNAL_SERVER_ERROR - headers = {} - safe = True - - def __init__(self, message=None, **kwargs): - """Initiate the instance of KarborException - - There are two ways to initiate the instance. - 1. Specify the value of 'message' and leave the 'kwargs' None. - 2. Leave 'message' None, and specify the keyword arguments matched - with the format of KarborException.message. Especially, can't - use the 'message' as the key in the 'kwargs', otherwise, the - first argument('message') will be set. - - Note: This class doesn't support to create instance of KarborException - with another instance. - """ - self.kwargs = kwargs - - if 'code' not in self.kwargs: - try: - self.kwargs['code'] = self.code - except AttributeError: - pass - - if not message: - try: - message = self.message % kwargs - - except Exception: - exc_info = sys.exc_info() - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception('Exception in string format operation') - for name, value in kwargs.items(): - LOG.error("%(name)s: %(value)s", - {'name': name, 'value': value}) - if CONF.fatal_exception_format_errors: - six.reraise(*exc_info) - # at least get the core message out if something happened - message = self.message - elif isinstance(message, Exception): - message = six.text_type(message) - - # NOTE(luisg): We put the actual message in 'msg' so that we can access - # it, because if we try to access the message via 'message' it will be - # overshadowed by the class' message attribute - self.msg = message - super(KarborException, self).__init__(message) - - def __unicode__(self): - return self.msg - - -class NotAuthorized(KarborException): - message = _("Not authorized.") - code = http_client.FORBIDDEN - - -class AdminRequired(NotAuthorized): - message = _("User does not have admin privileges") - - -class PolicyNotAuthorized(NotAuthorized): - message = _("Policy doesn't allow %(action)s to be performed.") - - -class AuthorizationFailure(NotAuthorized): - message = _("Authorization for %(obj)s is failed ") - - -class Invalid(KarborException): - message = _("Unacceptable parameters.") - code = http_client.BAD_REQUEST - - -class InvalidParameterValue(Invalid): - message = _("%(err)s") - - -class InvalidInput(Invalid): - message = _("Invalid input received: %(reason)s") - - -class ScheduledOperationExist(Invalid): - message = _("Scheduled Operation%(op_id)s exists") - - -class NotFound(KarborException): - message = _("Resource could not be found.") - code = http_client.NOT_FOUND - safe = True - - -class ConfigNotFound(NotFound): - message = _("Could not find config at %(path)s") - - -class MalformedRequestBody(KarborException): - message = _("Malformed message body: %(reason)s") - - -class InvalidContentType(Invalid): - message = _("Invalid content type %(content_type)s.") - - -class InvalidProtectableInstance(Invalid): - message = _("Invalid protectable instance.") - - -class PasteAppNotFound(NotFound): - message = _("Could not load paste app '%(name)s' from %(path)s") - - -class ServiceNotFound(NotFound): - message = _("Service %(service_id)s could not be found.") - - -class HostBinaryNotFound(NotFound): - message = _("Could not find binary %(binary)s on host %(host)s.") - - -class TriggerNotFound(NotFound): - message = _("Trigger %(id)s could not be found.") - - -class ScheduledOperationNotFound(NotFound): - message = _("Scheduled Operation %(id)s could not be found.") - - -class ScheduledOperationStateNotFound(NotFound): - message = _("Scheduled Operation State %(op_id)s could not be found.") - - -class ScheduledOperationLogNotFound(NotFound): - message = _("Scheduled Operation Log %(log_id)s could not be found.") - - -class ListProtectableResourceFailed(KarborException): - message = _("List protectable resources of type %(type)s failed: " - "%(reason)s") - - -class ProtectableResourceNotFound(NotFound): - message = _("The resource %(id)s of type %(type)s could not be found: " - "%(reason)s") - - -class ProtectableResourceInvalidStatus(KarborException): - message = _("The resource %(id)s of type %(type)s has a invalid " - "status: %(status)s") - - -class InvalidOperationObject(Invalid): - message = _("The operation %(operation_id)s is invalid") - - -class DeleteTriggerNotAllowed(NotAuthorized): - message = _("Can not delete trigger %(trigger_id)s") - - -class AccessCheckpointNotAllowed(NotAuthorized): - message = _("Access checkpoint %(checkpoint_id)s is not allowed") - - -class DeleteCheckpointNotAllowed(NotAuthorized): - message = _("Delete checkpoint %(checkpoint_id)s is not allowed") - - -class ClassNotFound(NotFound): - message = _("Class %(class_name)s could not be found: %(exception)s") - - -class InvalidOperationDefinition(Invalid): - message = _("Invalid operation definition, reason:%(reason)s") - - -OrphanedObjectError = obj_exc.OrphanedObjectError -ObjectActionError = obj_exc.ObjectActionError - - -class PlanNotFound(NotFound): - message = _("Plan %(plan_id)s could not be found.") - - -class OperationLogFound(NotFound): - message = _("Operation log %(operation_log_id)s could not be found.") - - -class RestoreNotFound(NotFound): - message = _("Restore %(restore_id)s could not be found.") - - -class VerificationNotFound(NotFound): - message = _("Verification %(verification_id)s could not be found.") - - -class OperationLogNotFound(NotFound): - message = _("OperationLog %(restore_id)s could not be found.") - - -class InvalidPlan(Invalid): - message = _("Invalid plan: %(reason)s") - - -class ProtectableTypeNotFound(NotFound): - message = _("ProtectableType %(protectable_type)s could" - " not be found.") - - -class ProtectionPluginNotFound(NotFound): - message = _("Protection Plugin for %(type)s could" - " not be found.") - - -class ProviderNotFound(NotFound): - message = _("Provider %(provider_id)s could" - " not be found.") - - -class CheckpointRecordNotFound(NotFound): - message = _("CheckpointRecord %(id)s could not be found.") - - -class CreateResourceFailed(KarborException): - message = _("Create %(name)s failed: %(reason)s, id=%(resource_id)s," - " type=%(resource_type)s") - - -class DeleteResourceFailed(KarborException): - message = _("Delete %(name)s failed: %(reason)s, id=%(resource_id)s," - " type=%(resource_type)s") - - -class RestoreResourceFailed(KarborException): - message = _("Restore %(name)s failed: %(reason)s, id=%(resource_id)s," - " type=%(resource_type)s") - - -class VerifyResourceFailed(KarborException): - message = _("Verify %(name)s failed: %(reason)s, id=%(resource_id)s," - " type=%(resource_type)s") - - -class FlowError(KarborException): - message = _("Flow: %(flow)s, Error: %(error)s") - - -class CheckpointNotFound(NotFound): - message = _("Checkpoint %(checkpoint_id)s could" - " not be found.") - - -class BankCreateObjectFailed(KarborException): - message = _("Create Object in Bank Failed: %(reason)s") - - -class BankUpdateObjectFailed(KarborException): - message = _("Update Object %(key)s in Bank Failed: %(reason)s") - - -class BankDeleteObjectFailed(KarborException): - message = _("Delete Object %(key)s in Bank Failed: %(reason)s") - - -class BankGetObjectFailed(KarborException): - message = _("Get Object %(key)s in Bank Failed: %(reason)s") - - -class BankListObjectsFailed(KarborException): - message = _("Get Object in Bank Failed: %(reason)s") - - -class BankReadonlyViolation(KarborException): - message = _("Bank read-only violation") - - -class AcquireLeaseFailed(KarborException): - message = _("Acquire Lease in Failed: %(reason)s") - - -class CreateContainerFailed(KarborException): - message = _("Create Container in Bank Failed: %(reason)s") - - -class CreateBucketFailed(KarborException): - message = _("Create Bucket in Bank Failed: %(reason)s") - - -class TriggerIsInvalid(Invalid): - message = _("Trigger%(trigger_id)s is invalid.") - - -class InvalidTaskFlowObject(Invalid): - message = _("The task flow is invalid: %(reason)s") - - -class InvalidOriginalId(Invalid): - message = _("The original_id: %(original_id)s is invalid.") - - -class CheckpointNotAvailable(KarborException): - message = _("The checkpoint %(checkpoint_id)s is not available") - - -class CheckpointNotBeDeleted(KarborException): - message = _("The checkpoint %(checkpoint_id)s can not be deleted.") - - -class CheckpointNotBeReset(KarborException): - message = _("The checkpoint %(checkpoint_id)s can not be reset.") - - -class GetProtectionNetworkSubResourceFailed(KarborException): - message = _("Get protection network sub-resources of type %(type)s failed:" - " %(reason)s") - - -class QuotaNotFound(NotFound): - message = _("Quota could not be found") - - -class QuotaResourceUnknown(QuotaNotFound): - message = _("Unknown quota resources %(unknown)s.") - - -class ProjectQuotaNotFound(QuotaNotFound): - message = _("Quota for project %(project_id)s could not be found.") - - -class QuotaClassNotFound(QuotaNotFound): - message = _("Quota class %(class_name)s could not be found.") - - -class QuotaUsageNotFound(QuotaNotFound): - message = _("Quota usage for project %(project_id)s could not be found.") - - -class ReservationNotFound(QuotaNotFound): - message = _("Quota reservation %(uuid)s could not be found.") - - -class OverQuota(KarborException): - message = _("Quota exceeded for resources: %(overs)s") - - -class InvalidReservationExpiration(Invalid): - message = _("Invalid reservation expiration %(expire)s.") - - -class InvalidQuotaValue(Invalid): - message = _("Change would make usage less than 0 for the following " - "resources: %(unders)s.") - - -class QuotaError(KarborException): - message = _("Quota exceeded: code=%(code)s") - code = 413 - headers = {'Retry-After': '0'} - safe = True - - -class PlanLimitExceeded(QuotaError): - message = _("Maximum number of plans allowed (%(allowed)d) exceeded") - - -class CheckpointLimitExceeded(QuotaError): - message = _("Maximum number of checkpoints allowed (%(allowed)d) exceeded") - - -class UnexpectedOverQuota(QuotaError): - message = _("Unexpected over quota on %(name)s.") - - -class InvalidName(Invalid): - message = _("An invalid 'name' value was provided. %(reason)s") - - -class ValidationError(Invalid): - message = "%(detail)s" diff --git a/karbor/i18n.py b/karbor/i18n.py deleted file mode 100644 index f77de2e0..00000000 --- a/karbor/i18n.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See https://docs.openstack.org/oslo.i18n/latest/user/usage.html . - -""" - -import oslo_i18n as i18n - -DOMAIN = 'karbor' - -_translators = i18n.TranslatorFactory(domain=DOMAIN) - -# The primary translation function using the well-known name "_" -_ = _translators.primary - - -def enable_lazy(enable=True): - return i18n.enable_lazy(enable) - - -def translate(value, user_locale=None): - return i18n.translate(value, user_locale) - - -def get_available_languages(): - return i18n.get_available_languages(DOMAIN) diff --git a/karbor/loadables.py b/karbor/loadables.py deleted file mode 100644 index fc7979c4..00000000 --- a/karbor/loadables.py +++ /dev/null @@ -1,124 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Generic Loadable class support. - -Meant to be used by such things as scheduler filters and weights where we -want to load modules from certain directories and find certain types of -classes within those modules. Note that this is quite different than -generic plugins and the pluginmanager code that exists elsewhere. - -Usage: - -Create a directory with an __init__.py with code such as: - -class SomeLoadableClass(object): - pass - - -class MyLoader(nova.loadables.BaseLoader) - def __init__(self): - super(MyLoader, self).__init__(SomeLoadableClass) - -If you create modules in the same directory and subclass SomeLoadableClass -within them, MyLoader().get_all_classes() will return a list -of such classes. -""" - -import inspect -import os -import sys - -from oslo_utils import importutils - -from karbor import exception - - -class BaseLoader(object): - def __init__(self, loadable_cls_type): - super(BaseLoader, self).__init__() - mod = sys.modules[self.__class__.__module__] - self.path = os.path.abspath(mod.__path__[0]) - self.package = mod.__package__ - self.loadable_cls_type = loadable_cls_type - - def _is_correct_class(self, obj): - """Check the obj whether it is the right class. - - Return whether an object is a class of the correct type and - is not prefixed with an underscore. - """ - - return (inspect.isclass(obj) and - (not obj.__name__.startswith('_')) and - issubclass(obj, self.loadable_cls_type) and - (obj is not self.loadable_cls_type)) - - def _get_classes_from_module(self, module_name): - """Get the classes from a module that match the type we want.""" - classes = [] - module = importutils.import_module(module_name) - for obj_name in dir(module): - # Skip objects that are meant to be private. - if obj_name.startswith('_'): - continue - itm = getattr(module, obj_name) - if self._is_correct_class(itm): - classes.append(itm) - return classes - - def get_all_classes(self): - """Get all classes. - - Get the classes of the type we want from all modules found - in the directory that defines this class. - """ - - classes = [] - for dirpath, dirnames, filenames in os.walk(self.path): - relpath = os.path.relpath(dirpath, self.path) - if relpath == '.': - relpkg = '' - else: - relpkg = '.%s' % '.'.join(relpath.split(os.sep)) - for fname in filenames: - root, ext = os.path.splitext(fname) - if ext != '.py' or root == '__init__': - continue - module_name = "%s%s.%s" % (self.package, relpkg, root) - mod_classes = self._get_classes_from_module(module_name) - classes.extend(mod_classes) - return classes - - def get_matching_classes(self, loadable_class_names): - """Get loadable classes from a list of names. - - Each name can be a full module path or the full path to a method that - returns classes to use. The latter behavior is useful to specify a - method that returns a list of classes to use in a default case. - """ - - classes = [] - for cls_name in loadable_class_names: - obj = importutils.import_class(cls_name) - if self._is_correct_class(obj): - classes.append(obj) - elif inspect.isfunction(obj): - # Get list of classes from a function - for cls in obj(): - classes.append(cls) - else: - error_str = 'Not a class of the correct type' - raise exception.ClassNotFound(class_name=cls_name, - exception=error_str) - return classes diff --git a/karbor/manager.py b/karbor/manager.py deleted file mode 100644 index 1150a278..00000000 --- a/karbor/manager.py +++ /dev/null @@ -1,117 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Base Manager class. - -Managers are responsible for a certain aspect of the system. It is a logical -grouping of code relating to a portion of the system. In general other -components should be using the manager to make changes to the components that -it is responsible for. - -We have adopted a basic strategy of Smart managers and dumb data, which means -rather than attaching methods to data objects, components should call manager -methods that act on the data. - -Methods on managers that can be executed locally should be called directly. If -a particular method must execute on a remote host, this should be done via rpc -to the service that wraps the manager - -Managers should be responsible for most of the db access, and -non-implementation specific data. Anything implementation specific that can't -be generalized should be done by the Driver. - -Managers will often provide methods for initial setup of a host or periodic -tasks to a wrapping service. - -This module provides Manager, a base class for managers. - -""" - - -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_service import periodic_task - -from karbor.db import base -from karbor import version - - -CONF = cfg.CONF - - -class PeriodicTasks(periodic_task.PeriodicTasks): - def __init__(self): - super(PeriodicTasks, self).__init__(CONF) - - -class Manager(base.Base, PeriodicTasks): - # Set RPC API version to 1.0 by default. - RPC_API_VERSION = '1.0' - - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, host=None, db_driver=None): - if not host: - host = CONF.host - self.host = host - self.additional_endpoints = [] - super(Manager, self).__init__(db_driver) - - def periodic_tasks(self, context, raise_on_error=False): - """Tasks to be run at a periodic interval.""" - return self.run_periodic_tasks(context, raise_on_error=raise_on_error) - - def init_host(self, **kwargs): - """Handle initialization if this is a standalone service. - - A hook point for services to execute tasks before the services are made - available (i.e. showing up on RPC and starting to accept RPC calls) to - other components. Child classes should override this method. - - """ - pass - - def cleanup_host(self): - """Hook to do cleanup work when the service shuts down. - - Child classes should override this method. - """ - pass - - def init_host_with_rpc(self): - """A hook for service to do jobs after RPC is ready. - - Like init_host(), this method is a hook where services get a chance - to execute tasks that *need* RPC. Child classes should override - this method. - - """ - pass - - def service_version(self): - return version.version_string() - - def service_config(self): - config = {} - for key in CONF: - config[key] = CONF.get(key, None) - return config - - def is_working(self): - """Method indicating if service is working correctly. - - This method is supposed to be overridden by subclasses and return if - manager is working correctly. - """ - return True diff --git a/karbor/objects/__init__.py b/karbor/objects/__init__.py deleted file mode 100644 index f08f1f34..00000000 --- a/karbor/objects/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def register_all(): - # You must make sure your object gets imported in this - # function in order for it to be registered by services that may - # need to receive it via RPC. - __import__('karbor.objects.service') - __import__('karbor.objects.plan') - __import__('karbor.objects.scheduled_operation') - __import__('karbor.objects.trigger') - __import__('karbor.objects.scheduled_operation_log') - __import__('karbor.objects.scheduled_operation_state') - __import__('karbor.objects.restore') - __import__('karbor.objects.operation_log') - __import__('karbor.objects.checkpoint_record') - __import__('karbor.objects.verification') diff --git a/karbor/objects/base.py b/karbor/objects/base.py deleted file mode 100644 index 21c25e39..00000000 --- a/karbor/objects/base.py +++ /dev/null @@ -1,211 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""karbor common internal object model""" - -import contextlib -import datetime - -from oslo_log import log as logging -from oslo_versionedobjects import base -from oslo_versionedobjects import fields - -from karbor import db -from karbor.db.sqlalchemy import models -from karbor import exception -from karbor.i18n import _ -from karbor import objects - - -LOG = logging.getLogger('object') -remotable = base.remotable -remotable_classmethod = base.remotable_classmethod -obj_make_list = base.obj_make_list - - -class KarborObjectRegistry(base.VersionedObjectRegistry): - def registration_hook(self, cls, index): - setattr(objects, cls.obj_name(), cls) - # For Versioned Object Classes that have a model store the model in - # a Class attribute named model - try: - model_name = cls.obj_name() - cls.model = getattr(models, model_name) - except (ImportError, AttributeError): - pass - - -class KarborObject(base.VersionedObject): - OBJ_SERIAL_NAMESPACE = 'karbor_object' - OBJ_PROJECT_NAMESPACE = 'karbor' - - def karbor_obj_get_changes(self): - """Returns a dict of changed fields with tz unaware datetimes. - - Any timezone aware datetime field will be converted to UTC timezone - and returned as timezone unaware datetime. - - This will allow us to pass these fields directly to a db update - method as they can't have timezone information. - """ - # Get dirtied/changed fields - changes = self.obj_get_changes() - - # Look for datetime objects that contain timezone information - for k, v in changes.items(): - if isinstance(v, datetime.datetime) and v.tzinfo: - # Remove timezone information and adjust the time according to - # the timezone information's offset. - changes[k] = v.replace(tzinfo=None) - v.utcoffset() - - # Return modified dict - return changes - - @base.remotable_classmethod - def get_by_id(cls, context, id, *args, **kwargs): - # To get by id we need to have a model and for the model to - # have an id field - if 'id' not in cls.fields: - msg = (_('VersionedObject %s cannot retrieve object by id.') % - (cls.obj_name())) - raise NotImplementedError(msg) - - model = getattr(models, cls.obj_name()) - orm_obj = db.get_by_id(context, model, id, *args, **kwargs) - kargs = {} - if hasattr(cls, 'DEFAULT_EXPECTED_ATTR'): - kargs = {'expected_attrs': getattr(cls, 'DEFAULT_EXPECTED_ATTR')} - return cls._from_db_object(context, cls(context), orm_obj, **kargs) - - def refresh(self): - # To refresh we need to have a model and for the model to have an id - # field - if 'id' not in self.fields: - msg = (_('VersionedObject %s cannot retrieve object by id.') % - (self.obj_name())) - raise NotImplementedError(msg) - - current = self.get_by_id(self._context, self.id) - - for field in self.fields: - # Only update attributes that are already set. We do not want to - # unexpectedly trigger a lazy-load. - if self.obj_attr_is_set(field): - if self[field] != current[field]: - self[field] = current[field] - self.obj_reset_changes() - - def __contains__(self, name): - # We're using obj_extra_fields to provide aliases for some fields while - # in transition period. This override is to make these aliases pass - # "'foo' in obj" tests. - return name in self.obj_extra_fields or super(KarborObject, - self).__contains__(name) - - -class KarborObjectDictCompat(base.VersionedObjectDictCompat): - """Mix-in to provide dictionary key access compat. - - If an object needs to support attribute access using - dictionary items instead of object attributes, inherit - from this class. This should only be used as a temporary - measure until all callers are converted to use modern - attribute access. - - NOTE(berrange) This class will eventually be deleted. - """ - - def get(self, key, value=base._NotSpecifiedSentinel): - """For backwards-compatibility with dict-based objects. - - NOTE(danms): May be removed in the future. - """ - if key not in self.obj_fields: - # NOTE(jdg): There are a number of places where we rely on the - # old dictionary version and do a get(xxx, None). - # The following preserves that compatibility but in - # the future we'll remove this shim altogether so don't - # rely on it. - LOG.debug('Karbor object %(object_name)s has no ' - 'attribute named: %(attribute_name)s', - {'object_name': self.__class__.__name__, - 'attribute_name': key}) - return None - if (value != base._NotSpecifiedSentinel and - not self.obj_attr_is_set(key)): - return value - else: - try: - return getattr(self, key) - except (exception.ObjectActionError, NotImplementedError): - # Exception when haven't set a value for non-lazy - # loadable attribute, but to mimic typical dict 'get' - # behavior we should still return None - return None - - -def DateTimeField(**kwargs): - return fields.DateTimeField(tzinfo_aware=False, **kwargs) - - -class KarborPersistentObject(object): - """Mixin class for Persistent objects. - - This adds the fields that we use in common for all persistent objects. - """ - fields = { - 'created_at': DateTimeField(nullable=True), - 'updated_at': DateTimeField(nullable=True), - 'deleted_at': DateTimeField(nullable=True), - 'deleted': fields.BooleanField(default=False), - } - - @contextlib.contextmanager - def obj_as_admin(self): - """Context manager to make an object call as an admin. - - This temporarily modifies the context embedded in an object to - be elevated() and restores it after the call completes. Example - usage: - - with obj.obj_as_admin(): - obj.save() - """ - if self._context is None: - raise exception.OrphanedObjectError(method='obj_as_admin', - objtype=self.obj_name()) - - original_context = self._context - self._context = self._context.elevated() - try: - yield - finally: - self._context = original_context - - -class KarborComparableObject(base.ComparableVersionedObject): - def __eq__(self, obj): - if hasattr(obj, 'obj_to_primitive'): - return self.obj_to_primitive() == obj.obj_to_primitive() - return False - - -class ObjectListBase(base.ObjectListBase): - pass - - -class KarborObjectSerializer(base.VersionedObjectSerializer): - OBJ_BASE_CLASS = KarborObject - - -class DictOfDictOfStringsField(fields.AutoTypedField): - AUTO_TYPE = fields.Dict(fields.Dict(fields.String(), nullable=True)) diff --git a/karbor/objects/checkpoint_record.py b/karbor/objects/checkpoint_record.py deleted file mode 100644 index beebc4f9..00000000 --- a/karbor/objects/checkpoint_record.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_versionedobjects import fields - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor.objects import base - -CONF = cfg.CONF - - -@base.KarborObjectRegistry.register -class CheckpointRecord(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'project_id': fields.UUIDField(), - 'checkpoint_id': fields.UUIDField(), - 'checkpoint_status': fields.StringField(), - 'provider_id': fields.UUIDField(), - 'plan_id': fields.UUIDField(), - 'operation_id': fields.UUIDField(nullable=True), - 'create_by': fields.StringField(nullable=True), - 'extend_info': fields.StringField(nullable=True), - } - - @staticmethod - def _from_db_object(context, checkpoint_record, db_checkpoint_record): - for name, field in checkpoint_record.fields.items(): - checkpoint_record[name] = db_checkpoint_record.get(name) - - checkpoint_record._context = context - checkpoint_record.obj_reset_changes() - return checkpoint_record - - @base.remotable_classmethod - def get_by_id(cls, context, id): - db_checkpoint_record = db.checkpoint_record_get(context, id) - if db_checkpoint_record: - return cls._from_db_object(context, cls(), db_checkpoint_record) - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.karbor_obj_get_changes() - db_checkpoint_record = db.checkpoint_record_create(self._context, - updates) - self._from_db_object(self._context, self, db_checkpoint_record) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - if updates and self.id: - db.checkpoint_record_update(self._context, self.id, updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - if self.id: - db.checkpoint_record_destroy(self._context, self.id) - - -@base.KarborObjectRegistry.register -class CheckpointRecordList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('CheckpointRecord'), - } - - @base.remotable_classmethod - def get_by_filters(cls, context, filters, limit=None, - marker=None, sort_keys=None, sort_dirs=None): - - checkpoint_record_list = db.checkpoint_record_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - return base.obj_make_list(context, - cls(context), - CheckpointRecord, - checkpoint_record_list) diff --git a/karbor/objects/operation_log.py b/karbor/objects/operation_log.py deleted file mode 100644 index 984b70e4..00000000 --- a/karbor/objects/operation_log.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class OperationLog(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'project_id': fields.UUIDField(), - 'operation_type': fields.StringField(), - 'checkpoint_id': fields.UUIDField(nullable=True), - 'plan_id': fields.UUIDField(nullable=True), - 'provider_id': fields.UUIDField(nullable=True), - 'restore_id': fields.UUIDField(nullable=True), - 'scheduled_operation_id': fields.UUIDField(nullable=True), - 'status': fields.StringField(nullable=True), - 'started_at': fields.DateTimeField(nullable=True), - 'ended_at': fields.DateTimeField(nullable=True), - 'error_info': fields.StringField(nullable=True), - 'extra_info': fields.StringField(nullable=True), - } - - @staticmethod - def _from_db_object(context, operation_log, db_operation_log): - for name, field in operation_log.fields.items(): - value = db_operation_log.get(name) - if isinstance(field, fields.IntegerField): - value = value or 0 - elif isinstance(field, fields.DateTimeField): - value = value or None - operation_log[name] = value - - operation_log._context = context - operation_log.obj_reset_changes() - return operation_log - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.karbor_obj_get_changes() - db_operation_log = db.operation_log_create(self._context, updates) - self._from_db_object(self._context, self, db_operation_log) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - if updates: - db.operation_log_update(self._context, self.id, updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - with self.obj_as_admin(): - db.operation_log_destroy(self._context, self.id) - - -@base.KarborObjectRegistry.register -class OperationLogList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('OperationLog'), - } - - @base.remotable_classmethod - def get_all(cls, context, marker, limit, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - operation_logs = db.operation_log_get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - return base.obj_make_list(context, cls(context), objects.OperationLog, - operation_logs) - - @base.remotable_classmethod - def get_all_by_project(cls, context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - operation_logs = db.operation_log_get_all_by_project( - context, project_id, marker, limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, filters=filters, offset=offset) - return base.obj_make_list(context, cls(context), objects.OperationLog, - operation_logs) diff --git a/karbor/objects/plan.py b/karbor/objects/plan.py deleted file mode 100644 index 389314b5..00000000 --- a/karbor/objects/plan.py +++ /dev/null @@ -1,181 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_versionedobjects import fields - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class Plan(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - OPTIONAL_FIELDS = ('resources',) - - DEFAULT_EXPECTED_ATTR = ('resources',) - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'description': fields.StringField(nullable=True), - 'provider_id': fields.UUIDField(), - 'project_id': fields.UUIDField(), - 'status': fields.StringField(nullable=True), - 'resources': fields.ListOfDictOfNullableStringsField(nullable=False), - 'parameters': base.DictOfDictOfStringsField(), - } - - # obj_extra_fields is used to hold properties that are not - # usually part of the model - obj_extra_fields = ['plan_resources'] - - def __init__(self, *args, **kwargs): - super(Plan, self).__init__(*args, **kwargs) - self._orig_resources = {} - self._reset_resources_tracking() - - def obj_reset_changes(self, fields=None): - super(Plan, self).obj_reset_changes(fields) - self._reset_resources_tracking(fields=fields) - - def _reset_resources_tracking(self, fields=None): - if fields is None or 'resources' in fields: - self._orig_resources = (list(self.resources) - if 'resources' in self else []) - - def obj_what_changed(self): - changes = super(Plan, self).obj_what_changed() - if 'resources' in self and self.resources != self._orig_resources: - changes.add('resources') - - return changes - - @staticmethod - def _from_db_object(context, plan, db_plan, expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for name, field in plan.fields.items(): - if name in Plan.OPTIONAL_FIELDS: - continue - value = db_plan.get(name) - if isinstance(field, fields.IntegerField): - value = value or 0 - if name == "parameters" and value is not None: - value = jsonutils.loads(value) - plan[name] = value - - # Get data from db_plan object that was queried by joined query - # from DB - if 'resources' in expected_attrs: - resources = db_plan.get('resources', []) - resources_list = [] - for resource in resources: - dict_temp = dict() - dict_temp['id'] = resource['resource_id'] - dict_temp['type'] = resource['resource_type'] - dict_temp['name'] = resource['resource_name'] - dict_temp['extra_info'] = ( - resource['resource_extra_info']) - resources_list.append(dict_temp) - plan.resources = resources_list - - plan._context = context - plan.obj_reset_changes() - return plan - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.karbor_obj_get_changes() - - parameters = updates.pop('parameters', None) - if parameters is not None: - updates['parameters'] = jsonutils.dumps(parameters) - - db_plan = db.plan_create(self._context, updates) - kargs = {} - if hasattr(Plan, 'DEFAULT_EXPECTED_ATTR'): - kargs = {'expected_attrs': getattr(Plan, 'DEFAULT_EXPECTED_ATTR')} - self._from_db_object(self._context, self, db_plan, **kargs) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - if updates: - if 'parameters' in updates: - parameters = updates.pop('parameters', None) - if parameters is not None: - updates['parameters'] = jsonutils.dumps(parameters) - - if 'resources' in updates: - resources = updates.pop('resources', None) - resources_objlist = db.plan_resources_update( - self._context, self.id, resources) - resources_dictlist = [] - for resource_obj in resources_objlist: - resource_dict = {} - resource_dict["plan_id"] = resource_obj.get("plan_id") - resource_dict["id"] = resource_obj.get("resource_id") - resource_dict["type"] = resource_obj.get("resource_type") - resource_dict["name"] = resource_obj.get("resource_name") - resource_dict["extra_info"] = resource_obj.get( - "resource_extra_info") - resources_dictlist.append(resource_dict) - self.resources = resources_dictlist - db.plan_update(self._context, self.id, updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - with self.obj_as_admin(): - db.plan_destroy(self._context, self.id) - - -@base.KarborObjectRegistry.register -class PlanList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('Plan'), - } - - @base.remotable_classmethod - def get_all(cls, context, marker, limit, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - plans = db.plan_get_all(context, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, - filters=filters, offset=offset) - expected_attrs = ['resources'] - return base.obj_make_list(context, cls(context), objects.Plan, - plans, expected_attrs=expected_attrs) - - @base.remotable_classmethod - def get_all_by_project(cls, context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - plans = db.plan_get_all_by_project(context, project_id, marker, - limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, offset=offset) - expected_attrs = ['resources'] - return base.obj_make_list(context, cls(context), objects.Plan, - plans, expected_attrs=expected_attrs) diff --git a/karbor/objects/restore.py b/karbor/objects/restore.py deleted file mode 100644 index 2e2e07b6..00000000 --- a/karbor/objects/restore.py +++ /dev/null @@ -1,133 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_versionedobjects import fields -import six - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class Restore(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'project_id': fields.UUIDField(), - 'provider_id': fields.UUIDField(), - 'checkpoint_id': fields.UUIDField(), - 'restore_target': fields.StringField(nullable=True), - 'parameters': base.DictOfDictOfStringsField(nullable=True), - 'status': fields.StringField(nullable=True), - 'resources_status': fields.DictOfStringsField(nullable=True), - 'resources_reason': fields.DictOfStringsField(nullable=True), - } - - json_fields = ('parameters', 'resources_status', 'resources_reason') - - @classmethod - def _from_db_object(cls, context, restore, db_restore): - for name, field in restore.fields.items(): - value = db_restore.get(name) - if isinstance(field, fields.IntegerField): - value = value or 0 - elif isinstance(field, fields.DateTimeField): - value = value or None - if name in cls.json_fields: - value = jsonutils.loads(value) if value else {} - restore[name] = value - - restore._context = context - restore.obj_reset_changes() - return restore - - @classmethod - def _convert_properties_to_db_format(cls, updates): - for attr in cls.json_fields: - value = updates.pop(attr, None) - if value: - updates[attr] = jsonutils.dumps(value) - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.karbor_obj_get_changes() - self._convert_properties_to_db_format(updates) - db_restore = db.restore_create(self._context, updates) - self._from_db_object(self._context, self, db_restore) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - self._convert_properties_to_db_format(updates) - if updates: - db.restore_update(self._context, self.id, updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - with self.obj_as_admin(): - db.restore_destroy(self._context, self.id) - - @base.remotable - def update_resource_status(self, resource_type, resource_id, status, - reason=None): - key = '{}#{}'.format(resource_type, resource_id) - if not self.obj_attr_is_set('resources_status'): - self.resources_status = {} - self.resources_status[key] = status - self._changed_fields.add('resources_status') - if isinstance(reason, six.string_types): - if not self.obj_attr_is_set('resources_reason'): - self.resources_reason = {} - self.resources_reason[key] = reason - self._changed_fields.add('resources_reason') - - -@base.KarborObjectRegistry.register -class RestoreList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('Restore'), - } - - @base.remotable_classmethod - def get_all(cls, context, marker, limit, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - restores = db.restore_get_all(context, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, - filters=filters, offset=offset) - return base.obj_make_list(context, cls(context), objects.Restore, - restores) - - @base.remotable_classmethod - def get_all_by_project(cls, context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - restores = db.restore_get_all_by_project(context, project_id, marker, - limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - return base.obj_make_list(context, cls(context), objects.Restore, - restores) diff --git a/karbor/objects/scheduled_operation.py b/karbor/objects/scheduled_operation.py deleted file mode 100644 index 7e8399cb..00000000 --- a/karbor/objects/scheduled_operation.py +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_versionedobjects import fields - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class ScheduledOperation(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'description': fields.StringField(nullable=True), - 'operation_type': fields.StringField(), - 'user_id': fields.UUIDField(), - 'project_id': fields.UUIDField(), - 'trigger_id': fields.UUIDField(), - 'operation_definition': fields.DictOfStringsField(), - 'enabled': fields.BooleanField(default=True), - - 'trigger': fields.ObjectField("Trigger") - } - - INSTANCE_OPTIONAL_JOINED_FIELDS = ['trigger'] - - @staticmethod - def _from_db_object(context, op, db_op, expected_attrs=[]): - special_fields = set(['operation_definition'] + - op.INSTANCE_OPTIONAL_JOINED_FIELDS) - - normal_fields = set(op.fields) - special_fields - for name in normal_fields: - op[name] = db_op.get(name) - - op_definition = db_op['operation_definition'] - if op_definition: - op['operation_definition'] = jsonutils.loads(op_definition) - - if 'trigger' in expected_attrs: - if db_op.get('trigger', None) is None: - op.trigger = None - else: - if not op.obj_attr_is_set('trigger'): - op.trigger = objects.Trigger(context) - op.trigger._from_db_object(context, op.trigger, - db_op['trigger']) - - op._context = context - op.obj_reset_changes() - return op - - @staticmethod - def _convert_operation_definition_to_db_format(updates): - op_definition = updates.pop('operation_definition', None) - if op_definition is not None: - updates['operation_definition'] = jsonutils.dumps(op_definition) - - @base.remotable_classmethod - def get_by_id(cls, context, id, expected_attrs=[]): - columns_to_join = [col for col in expected_attrs - if col in cls.INSTANCE_OPTIONAL_JOINED_FIELDS] - - db_op = db.scheduled_operation_get(context, id, columns_to_join) - if db_op: - return cls._from_db_object(context, cls(), db_op, expected_attrs) - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - - updates = self.karbor_obj_get_changes() - self._convert_operation_definition_to_db_format(updates) - db_op = db.scheduled_operation_create(self._context, updates) - self._from_db_object(self._context, self, db_op) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - if updates and self.id: - self._convert_operation_definition_to_db_format(updates) - db.scheduled_operation_update(self._context, - self.id, - updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - if self.id: - db.scheduled_operation_delete(self._context, self.id) - - -@base.KarborObjectRegistry.register -class ScheduledOperationList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('ScheduledOperation'), - } - - @base.remotable_classmethod - def get_by_filters(cls, context, filters, limit=None, - marker=None, sort_keys=None, sort_dirs=None): - - db_operation_list = db.scheduled_operation_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - return base.obj_make_list(context, cls(context), ScheduledOperation, - db_operation_list) diff --git a/karbor/objects/scheduled_operation_log.py b/karbor/objects/scheduled_operation_log.py deleted file mode 100644 index 1fc944de..00000000 --- a/karbor/objects/scheduled_operation_log.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class ScheduledOperationLog(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.IntegerField(), - 'operation_id': fields.UUIDField(), - 'expect_start_time': base.DateTimeField(nullable=True), - 'triggered_time': base.DateTimeField(nullable=True), - 'actual_start_time': base.DateTimeField(nullable=True), - 'end_time': base.DateTimeField(nullable=True), - 'state': fields.StringField(), - 'extend_info': fields.StringField(nullable=True), - } - - @staticmethod - def _from_db_object(context, log, db_log): - for name, field in log.fields.items(): - log[name] = db_log.get(name) - - log._context = context - log.obj_reset_changes() - return log - - @base.remotable_classmethod - def get_by_id(cls, context, id): - db_log = db.scheduled_operation_log_get(context, id) - if db_log: - return cls._from_db_object(context, cls(), db_log) - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - - updates = self.karbor_obj_get_changes() - db_log = db.scheduled_operation_log_create(self._context, updates) - self._from_db_object(self._context, self, db_log) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - if updates and self.id is not None: - db.scheduled_operation_log_update(self._context, self.id, updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - if self.id is not None: - db.scheduled_operation_log_delete(self._context, self.id) - - @base.remotable_classmethod - def destroy_oldest(cls, context, operation_id, - retained_num, excepted_states=[]): - db.scheduled_operation_log_delete_oldest( - context, operation_id, retained_num, excepted_states) - - -@base.KarborObjectRegistry.register -class ScheduledOperationLogList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('ScheduledOperationLog'), - } - - @base.remotable_classmethod - def get_by_filters(cls, context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None): - - db_log_list = db.scheduled_operation_log_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, sort_keys=sort_keys, - sort_dirs=sort_dirs) - - return base.obj_make_list( - context, cls(context), ScheduledOperationLog, db_log_list) diff --git a/karbor/objects/scheduled_operation_state.py b/karbor/objects/scheduled_operation_state.py deleted file mode 100644 index ff0083f9..00000000 --- a/karbor/objects/scheduled_operation_state.py +++ /dev/null @@ -1,121 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class ScheduledOperationState(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.IntegerField(), - 'operation_id': fields.UUIDField(), - 'service_id': fields.IntegerField(), - 'trust_id': fields.StringField(), - 'state': fields.StringField(), - 'end_time_for_run': base.DateTimeField(nullable=True), - - 'operation': fields.ObjectField("ScheduledOperation") - } - - INSTANCE_OPTIONAL_JOINED_FIELDS = ['operation'] - - @staticmethod - def _from_db_object(context, state, db_state, expected_attrs=[]): - special_fields = set(state.INSTANCE_OPTIONAL_JOINED_FIELDS) - normal_fields = set(state.fields) - special_fields - for name in normal_fields: - state[name] = db_state.get(name) - - if 'operation' in expected_attrs: - if db_state.get('operation', None) is None: - state.operation = None - else: - if not state.obj_attr_is_set('operation'): - state.operation = objects.ScheduledOperation(context) - state.operation._from_db_object(context, state.operation, - db_state['operation']) - - state._context = context - state.obj_reset_changes() - return state - - @base.remotable_classmethod - def get_by_operation_id(cls, context, operation_id, expected_attrs=[]): - columns_to_join = [col for col in expected_attrs - if col in cls.INSTANCE_OPTIONAL_JOINED_FIELDS] - - db_state = db.scheduled_operation_state_get( - context, operation_id, columns_to_join) - if db_state: - return cls._from_db_object(context, cls(), - db_state, columns_to_join) - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - - updates = self.karbor_obj_get_changes() - db_state = db.scheduled_operation_state_create(self._context, updates) - self._from_db_object(self._context, self, db_state) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - if updates and self.operation_id: - db.scheduled_operation_state_update(self._context, - self.operation_id, - updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - if self.operation_id: - db.scheduled_operation_state_delete(self._context, - self.operation_id) - - -@base.KarborObjectRegistry.register -class ScheduledOperationStateList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('ScheduledOperationState'), - } - - @base.remotable_classmethod - def get_by_filters(cls, context, filters, limit=None, marker=None, - sort_keys=None, sort_dirs=None, columns_to_join=[]): - - option_column = ScheduledOperationState.INSTANCE_OPTIONAL_JOINED_FIELDS - valid_columns = [column for column in columns_to_join - if column in option_column] - - db_state_list = db.scheduled_operation_state_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, sort_keys=sort_keys, - sort_dirs=sort_dirs, columns_to_join=valid_columns) - - return base.obj_make_list( - context, cls(context), ScheduledOperationState, db_state_list, - expected_attrs=valid_columns) diff --git a/karbor/objects/service.py b/karbor/objects/service.py deleted file mode 100644 index 774f5f69..00000000 --- a/karbor/objects/service.py +++ /dev/null @@ -1,121 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class Service(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.IntegerField(), - 'host': fields.StringField(nullable=True), - 'binary': fields.StringField(nullable=True), - 'topic': fields.StringField(nullable=True), - 'report_count': fields.IntegerField(default=0), - 'disabled': fields.BooleanField(default=False), - 'disabled_reason': fields.StringField(nullable=True), - 'modified_at': base.DateTimeField(nullable=True), - 'rpc_current_version': fields.StringField(nullable=True), - 'rpc_available_version': fields.StringField(nullable=True), - } - - @staticmethod - def _from_db_object(context, service, db_service): - for name, field in service.fields.items(): - value = db_service.get(name) - if isinstance(field, fields.IntegerField): - value = value or 0 - elif isinstance(field, fields.DateTimeField): - value = value or None - service[name] = value - - service._context = context - service.obj_reset_changes() - return service - - @base.remotable_classmethod - def get_by_host_and_topic(cls, context, host, topic): - db_service = db.service_get_by_host_and_topic(context, host, topic) - return cls._from_db_object(context, cls(context), db_service) - - @base.remotable_classmethod - def get_by_args(cls, context, host, binary_key): - db_service = db.service_get_by_args(context, host, binary_key) - return cls._from_db_object(context, cls(context), db_service) - - @base.remotable_classmethod - def get_by_id(cls, context, id): - db_service = db.service_get(context, id) - return cls._from_db_object(context, cls(context), db_service) - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.karbor_obj_get_changes() - db_service = db.service_create(self._context, updates) - self._from_db_object(self._context, self, db_service) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - if updates: - db.service_update(self._context, self.id, updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - with self.obj_as_admin(): - db.service_destroy(self._context, self.id) - - -@base.KarborObjectRegistry.register -class ServiceList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('Service'), - } - child_versions = { - '1.0': '1.0' - } - - @base.remotable_classmethod - def get_all(cls, context, filters=None): - services = db.service_get_all(context, filters) - return base.obj_make_list(context, cls(context), objects.Service, - services) - - @base.remotable_classmethod - def get_all_by_args(cls, context, host, binary): - services = db.service_get_all_by_args(context, host, binary) - return base.obj_make_list(context, cls(context), objects.Service, - services) - - @base.remotable_classmethod - def get_all_by_topic(cls, context, topic, disabled=None): - services = db.service_get_all_by_topic(context, topic, - disabled=disabled) - return base.obj_make_list(context, cls(context), objects.Service, - services) diff --git a/karbor/objects/trigger.py b/karbor/objects/trigger.py deleted file mode 100644 index 02909765..00000000 --- a/karbor/objects/trigger.py +++ /dev/null @@ -1,101 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_versionedobjects import fields - -from karbor import db -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class Trigger(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'project_id': fields.UUIDField(), - 'type': fields.StringField(), - 'properties': fields.DictOfStringsField(), - } - - @staticmethod - def _from_db_object(context, trigger, db_trigger): - special_fields = set(['properties']) - - normal_fields = set(trigger.fields) - special_fields - for name in normal_fields: - trigger[name] = db_trigger.get(name) - - properties = db_trigger['properties'] - if properties: - trigger['properties'] = jsonutils.loads(properties) - - trigger._context = context - trigger.obj_reset_changes() - return trigger - - @staticmethod - def _convert_properties_to_db_format(updates): - properties = updates.pop('properties', None) - if properties is not None: - updates['properties'] = jsonutils.dumps(properties) - - @base.remotable_classmethod - def get_by_id(cls, context, id): - db_trigger = db.trigger_get(context, id) - if db_trigger: - return cls._from_db_object(context, cls(), db_trigger) - - @base.remotable - def create(self): - updates = self.karbor_obj_get_changes() - self._convert_properties_to_db_format(updates) - db_trigger = db.trigger_create(self._context, updates) - self._from_db_object(self._context, self, db_trigger) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - if updates and self.id: - self._convert_properties_to_db_format(updates) - db.trigger_update(self._context, self.id, updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - if self.id: - db.trigger_delete(self._context, self.id) - - -@base.KarborObjectRegistry.register -class TriggerList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('Trigger'), - } - - @base.remotable_classmethod - def get_by_filters(cls, context, filters, limit=None, - marker=None, sort_keys=None, sort_dirs=None): - - db_trigger_list = db.trigger_get_all_by_filters_sort( - context, filters, limit=limit, marker=marker, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - return base.obj_make_list(context, cls(context), Trigger, - db_trigger_list) diff --git a/karbor/objects/verification.py b/karbor/objects/verification.py deleted file mode 100644 index 292877c6..00000000 --- a/karbor/objects/verification.py +++ /dev/null @@ -1,134 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_versionedobjects import fields -import six - -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.objects import base - - -@base.KarborObjectRegistry.register -class Verification(base.KarborPersistentObject, base.KarborObject, - base.KarborObjectDictCompat, - base.KarborComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'project_id': fields.UUIDField(), - 'provider_id': fields.UUIDField(), - 'checkpoint_id': fields.UUIDField(), - 'parameters': base.DictOfDictOfStringsField(nullable=True), - 'status': fields.StringField(nullable=True), - 'resources_status': fields.DictOfStringsField(nullable=True), - 'resources_reason': fields.DictOfStringsField(nullable=True), - } - - json_fields = ('parameters', 'resources_status', 'resources_reason') - - @classmethod - def _from_db_object(cls, context, verification, db_verification): - for name, field in verification.fields.items(): - value = db_verification.get(name) - if isinstance(field, fields.IntegerField): - value = value or 0 - elif isinstance(field, fields.DateTimeField): - value = value or None - if name in cls.json_fields: - value = jsonutils.loads(value) if value else {} - verification[name] = value - - verification._context = context - verification.obj_reset_changes() - return verification - - @classmethod - def _convert_properties_to_db_format(cls, updates): - for attr in cls.json_fields: - value = updates.pop(attr, None) - if value: - updates[attr] = jsonutils.dumps(value) - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.karbor_obj_get_changes() - self._convert_properties_to_db_format(updates) - db_verification = db.verification_create(self._context, updates) - self._from_db_object(self._context, self, db_verification) - - @base.remotable - def save(self): - updates = self.karbor_obj_get_changes() - self._convert_properties_to_db_format(updates) - if updates: - db.verification_update(self._context, self.id, updates) - self.obj_reset_changes() - - @base.remotable - def destroy(self): - with self.obj_as_admin(): - db.verification_destroy(self._context, self.id) - - @base.remotable - def update_resource_status(self, resource_type, resource_id, status, - reason=None): - key = '{}#{}'.format(resource_type, resource_id) - if not self.obj_attr_is_set('resources_status'): - self.resources_status = {} - self.resources_status[key] = status - self._changed_fields.add('resources_status') - if isinstance(reason, six.string_types): - if not self.obj_attr_is_set('resources_reason'): - self.resources_reason = {} - self.resources_reason[key] = reason - self._changed_fields.add('resources_reason') - - -@base.KarborObjectRegistry.register -class VerificationList(base.ObjectListBase, base.KarborObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('Verification'), - } - - @base.remotable_classmethod - def get_all(cls, context, marker, limit, sort_keys=None, sort_dirs=None, - filters=None, offset=None): - verifications = db.verification_get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, offset=offset) - return base.obj_make_list(context, cls(context), objects.Verification, - verifications) - - @base.remotable_classmethod - def get_all_by_project(cls, context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - verifications = db.verification_get_all_by_project( - context, project_id, marker, - limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - return base.obj_make_list(context, cls(context), objects.Verification, - verifications) diff --git a/karbor/policies/__init__.py b/karbor/policies/__init__.py deleted file mode 100644 index e19f287a..00000000 --- a/karbor/policies/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools - -from karbor.policies import base -from karbor.policies import copies -from karbor.policies import operation_logs -from karbor.policies import plans -from karbor.policies import protectables -from karbor.policies import providers -from karbor.policies import quota_classes -from karbor.policies import quotas -from karbor.policies import restores -from karbor.policies import scheduled_operations -from karbor.policies import services -from karbor.policies import triggers -from karbor.policies import verifications - - -def list_rules(): - return itertools.chain( - base.list_rules(), - plans.list_rules(), - restores.list_rules(), - protectables.list_rules(), - providers.list_rules(), - triggers.list_rules(), - scheduled_operations.list_rules(), - operation_logs.list_rules(), - verifications.list_rules(), - services.list_rules(), - quotas.list_rules(), - quota_classes.list_rules(), - copies.list_rules(), - ) diff --git a/karbor/policies/base.py b/karbor/policies/base.py deleted file mode 100644 index 8a8ba324..00000000 --- a/karbor/policies/base.py +++ /dev/null @@ -1,34 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' -RULE_ADMIN_API = 'rule:admin_api' - -rules = [ - policy.RuleDefault('context_is_admin', 'role:admin'), - policy.RuleDefault('admin_or_owner', - 'is_admin:True or (role:admin and ' - 'is_admin_project:True) or project_id:%(project_id)s'), - policy.RuleDefault('default', - 'rule:admin_or_owner'), - policy.RuleDefault('admin_api', - 'is_admin:True or (role:admin and ' - 'is_admin_project:True)'), -] - - -def list_rules(): - return rules diff --git a/karbor/policies/copies.py b/karbor/policies/copies.py deleted file mode 100644 index 1be24c79..00000000 --- a/karbor/policies/copies.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -CREATE_POLICY = 'copy:create' - -copies_policies = [ - policy.DocumentedRuleDefault( - name=CREATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Create a copy.', - operations=[ - { - 'method': 'POST', - 'path': '/{project_id}/providers/{provider_id}/' - 'checkpoints/action' - } - ]), -] - - -def list_rules(): - return copies_policies diff --git a/karbor/policies/operation_logs.py b/karbor/policies/operation_logs.py deleted file mode 100644 index 4935b46d..00000000 --- a/karbor/policies/operation_logs.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -GET_POLICY = 'operation_log:get' -GET_ALL_POLICY = 'operation_log:list' - -operation_logs_policies = [ - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get an operation_log.', - operations=[ - { - 'method': 'GET', - 'path': '/operation_logs/{operation_log_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get operation_logs.', - operations=[ - { - 'method': 'GET', - 'path': '/operation_logs' - } - ]), -] - - -def list_rules(): - return operation_logs_policies diff --git a/karbor/policies/plans.py b/karbor/policies/plans.py deleted file mode 100644 index eda3b787..00000000 --- a/karbor/policies/plans.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -CREATE_POLICY = 'plan:create' -UPDATE_POLICY = 'plan:update' -DELETE_POLICY = 'plan:delete' -GET_POLICY = 'plan:get' -GET_ALL_POLICY = 'plan:get_all' - -plans_policies = [ - policy.DocumentedRuleDefault( - name=CREATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Create a plan.', - operations=[ - { - 'method': 'POST', - 'path': '/plans' - } - ]), - policy.DocumentedRuleDefault( - name=UPDATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Update a plan.', - operations=[ - { - 'method': 'PUT', - 'path': '/plans/{plan_id}' - } - ]), - policy.DocumentedRuleDefault( - name=DELETE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Delete a plan.', - operations=[ - { - 'method': 'DELETE', - 'path': '/plans/{plan_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get a plan.', - operations=[ - { - 'method': 'GET', - 'path': '/plans/{plan_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get plans.', - operations=[ - { - 'method': 'GET', - 'path': '/plans' - } - ]), -] - - -def list_rules(): - return plans_policies diff --git a/karbor/policies/protectables.py b/karbor/policies/protectables.py deleted file mode 100644 index 6ad40da9..00000000 --- a/karbor/policies/protectables.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -GET_POLICY = 'protectable:get' -GET_ALL_POLICY = 'protectable:get_all' -INSTANCES_GET_POLICY = 'protectable:instance_get' -INSTANCES_GET_ALL_POLICY = 'protectable:instance_get_all' - -protectables_policies = [ - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Show a protectable type.', - operations=[ - { - 'method': 'GET', - 'path': '/protectables/{protectable_type}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='List protectable types.', - operations=[ - { - 'method': 'GET', - 'path': '/protectables' - } - ]), - policy.DocumentedRuleDefault( - name=INSTANCES_GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Show a protectable instance.', - operations=[ - { - 'method': 'GET', - 'path': '/protectables/{protectable_type}/' - 'instances/{resource_id}' - } - ]), - policy.DocumentedRuleDefault( - name=INSTANCES_GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='List protectable instances.', - operations=[ - { - 'method': 'GET', - 'path': '/protectables/{protectable_type}/instances' - } - ]), -] - - -def list_rules(): - return protectables_policies diff --git a/karbor/policies/providers.py b/karbor/policies/providers.py deleted file mode 100644 index eb6f547d..00000000 --- a/karbor/policies/providers.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -GET_POLICY = 'provider:get' -GET_ALL_POLICY = 'provider:get_all' -CHECKPOINT_GET_POLICY = 'provider:checkpoint_get' -CHECKPOINT_GET_ALL_POLICY = 'provider:checkpoint_get_all' -CHECKPOINT_CREATE_POLICY = 'provider:checkpoint_create' -CHECKPOINT_DELETE_POLICY = 'provider:checkpoint_delete' -CHECKPOINT_UPDATE_POLICY = 'provider:checkpoint_update' - - -providers_policies = [ - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Show a protection provider.', - operations=[ - { - 'method': 'GET', - 'path': '/providers/{provider_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='List protection providers.', - operations=[ - { - 'method': 'GET', - 'path': '/providers' - } - ]), - policy.DocumentedRuleDefault( - name=CHECKPOINT_GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Show a checkpoint.', - operations=[ - { - 'method': 'GET', - 'path': '/providers/{provider_id}/checkpoints/{checkpoint_id}' - } - ]), - policy.DocumentedRuleDefault( - name=CHECKPOINT_GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='List checkpoints.', - operations=[ - { - 'method': 'GET', - 'path': '/providers/{provider_id}/checkpoints' - } - ]), - policy.DocumentedRuleDefault( - name=CHECKPOINT_CREATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Create checkpoint.', - operations=[ - { - 'method': 'POST', - 'path': '/providers/{provider_id}/checkpoints' - } - ]), - policy.DocumentedRuleDefault( - name=CHECKPOINT_DELETE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Delete checkpoint.', - operations=[ - { - 'method': 'DELETE', - 'path': '/providers/{provider_id}/checkpoints/{checkpoint_id}' - } - ]), - policy.DocumentedRuleDefault( - name=CHECKPOINT_UPDATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Reset checkpoint state.', - operations=[ - { - 'method': 'PUT', - 'path': '/providers/{provider_id}/checkpoints/{checkpoint_id}' - } - ] - ) -] - - -def list_rules(): - return providers_policies diff --git a/karbor/policies/quota_classes.py b/karbor/policies/quota_classes.py deleted file mode 100644 index 0dc1430f..00000000 --- a/karbor/policies/quota_classes.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -UPDATE_POLICY = 'quota_class:update' -GET_POLICY = 'quota_class:get' - -quota_classes_policies = [ - policy.DocumentedRuleDefault( - name=UPDATE_POLICY, - check_str=base.RULE_ADMIN_API, - description='Update quota classes.', - operations=[ - { - 'method': 'PUT', - 'path': '/quota_classes/{quota_class_name}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get quota classes.', - operations=[ - { - 'method': 'GET', - 'path': '/quota_classes/{quota_class_name}' - } - ]), -] - - -def list_rules(): - return quota_classes_policies diff --git a/karbor/policies/quotas.py b/karbor/policies/quotas.py deleted file mode 100644 index 1561ba09..00000000 --- a/karbor/policies/quotas.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -UPDATE_POLICY = 'quota:update' -DELETE_POLICY = 'quota:delete' -GET_POLICY = 'quota:get' -GET_DEFAULT_POLICY = 'quota:get_default' - -quotas_policies = [ - policy.DocumentedRuleDefault( - name=UPDATE_POLICY, - check_str=base.RULE_ADMIN_API, - description='Update quotas for a project.', - operations=[ - { - 'method': 'PUT', - 'path': '/quotas/{project_id}' - } - ]), - policy.DocumentedRuleDefault( - name=DELETE_POLICY, - check_str=base.RULE_ADMIN_API, - description='Delete quotas for a project.', - operations=[ - { - 'method': 'DELETE', - 'path': '/quotas/{project_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get quotas for a project.', - operations=[ - { - 'method': 'GET', - 'path': '/quotas/{project_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_DEFAULT_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get default quotas for a project.', - operations=[ - { - 'method': 'GET', - 'path': '/quotas/{project_id}/defaults' - } - ]), -] - - -def list_rules(): - return quotas_policies diff --git a/karbor/policies/restores.py b/karbor/policies/restores.py deleted file mode 100644 index 1d9ec571..00000000 --- a/karbor/policies/restores.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -CREATE_POLICY = 'restore:create' -UPDATE_POLICY = 'restore:update' -GET_POLICY = 'restore:get' -GET_ALL_POLICY = 'restore:get_all' - -restores_policies = [ - policy.DocumentedRuleDefault( - name=CREATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Create a restore.', - operations=[ - { - 'method': 'POST', - 'path': '/restores' - } - ]), - policy.DocumentedRuleDefault( - name=UPDATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Update a restore.', - operations=[ - { - 'method': 'PUT', - 'path': '/restores' - } - ]), - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get a restore.', - operations=[ - { - 'method': 'GET', - 'path': '/restores/{restore_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get restores.', - operations=[ - { - 'method': 'GET', - 'path': '/restores' - } - ]), -] - - -def list_rules(): - return restores_policies diff --git a/karbor/policies/scheduled_operations.py b/karbor/policies/scheduled_operations.py deleted file mode 100644 index 8574f94f..00000000 --- a/karbor/policies/scheduled_operations.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -CREATE_POLICY = 'scheduled_operation:create' -DELETE_POLICY = 'scheduled_operation:delete' -GET_POLICY = 'scheduled_operation:get' -GET_ALL_POLICY = 'scheduled_operation:list' - -scheduled_operations_policies = [ - policy.DocumentedRuleDefault( - name=CREATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Create a scheduled_operation.', - operations=[ - { - 'method': 'POST', - 'path': '/scheduled_operations' - } - ]), - policy.DocumentedRuleDefault( - name=DELETE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Delete a scheduled_operation.', - operations=[ - { - 'method': 'DELETE', - 'path': '/scheduled_operations/{scheduled_operation_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get a scheduled_operation.', - operations=[ - { - 'method': 'GET', - 'path': '/scheduled_operations/{scheduled_operation_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get scheduled_operations.', - operations=[ - { - 'method': 'GET', - 'path': '/scheduled_operations' - } - ]), -] - - -def list_rules(): - return scheduled_operations_policies diff --git a/karbor/policies/services.py b/karbor/policies/services.py deleted file mode 100644 index 077da48a..00000000 --- a/karbor/policies/services.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_policy import policy - -from karbor.policies import base - -GET_ALL_POLICY = 'service:get_all' -UPDATE_POLICY = 'service:update' - -service_policies = [ - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_API, - description='List services.', - operations=[ - { - 'method': 'GET', - 'path': '/os-services' - } - ]), - policy.DocumentedRuleDefault( - name=UPDATE_POLICY, - check_str=base.RULE_ADMIN_API, - description='Update service status', - operations=[ - { - 'method': 'PUT', - 'path': '/os-services/{service_id}' - } - ]), -] - - -def list_rules(): - return service_policies diff --git a/karbor/policies/triggers.py b/karbor/policies/triggers.py deleted file mode 100644 index 8e1ae0c6..00000000 --- a/karbor/policies/triggers.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -CREATE_POLICY = 'trigger:create' -UPDATE_POLICY = 'trigger:update' -DELETE_POLICY = 'trigger:delete' -GET_POLICY = 'trigger:get' -GET_ALL_POLICY = 'trigger:list' - -triggers_policies = [ - policy.DocumentedRuleDefault( - name=CREATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Create a trigger.', - operations=[ - { - 'method': 'POST', - 'path': '/triggers' - } - ]), - policy.DocumentedRuleDefault( - name=UPDATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Update a trigger.', - operations=[ - { - 'method': 'PUT', - 'path': '/triggers/{trigger_id}' - } - ]), - policy.DocumentedRuleDefault( - name=DELETE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Delete a trigger.', - operations=[ - { - 'method': 'DELETE', - 'path': '/triggers/{trigger_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get a trigger.', - operations=[ - { - 'method': 'GET', - 'path': '/triggers/{trigger_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get triggerss.', - operations=[ - { - 'method': 'GET', - 'path': '/triggers' - } - ]), -] - - -def list_rules(): - return triggers_policies diff --git a/karbor/policies/verifications.py b/karbor/policies/verifications.py deleted file mode 100644 index 4fda0881..00000000 --- a/karbor/policies/verifications.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from karbor.policies import base - - -CREATE_POLICY = 'verification:create' -GET_POLICY = 'verification:get' -GET_ALL_POLICY = 'verification:get_all' - -verifications_policies = [ - policy.DocumentedRuleDefault( - name=CREATE_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Create a verification.', - operations=[ - { - 'method': 'POST', - 'path': '/verifications' - } - ]), - policy.DocumentedRuleDefault( - name=GET_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get a verification.', - operations=[ - { - 'method': 'GET', - 'path': '/verifications/{verification_id}' - } - ]), - policy.DocumentedRuleDefault( - name=GET_ALL_POLICY, - check_str=base.RULE_ADMIN_OR_OWNER, - description='Get verifications.', - operations=[ - { - 'method': 'GET', - 'path': '/verifications' - } - ]), -] - - -def list_rules(): - return verifications_policies diff --git a/karbor/policy.py b/karbor/policy.py deleted file mode 100644 index 504dbde2..00000000 --- a/karbor/policy.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy Engine For Karbor""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_policy import opts as policy_opts -from oslo_policy import policy -from oslo_utils import excutils - -from karbor import exception -from karbor import policies - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -policy_opts.set_defaults(cfg.CONF, 'policy.json') - -_ENFORCER = None - - -def reset(): - global _ENFORCER - if _ENFORCER: - _ENFORCER.clear() - _ENFORCER = None - - -def init(policy_file=None, rules=None, default_rule=None, use_conf=True): - """Init an Enforcer class. - - :param policy_file: Custom policy file to use, if none is specified, - `CONF.policy_file` will be used. - :param rules: Default dictionary / Rules to use. It will be - considered just in the first instantiation. - :param default_rule: Default rule to use, CONF.default_rule will - be used if none is specified. - :param use_conf: Whether to load rules from config file. - """ - - global _ENFORCER - if not _ENFORCER: - _ENFORCER = policy.Enforcer(CONF, - policy_file=policy_file, - rules=rules, - default_rule=default_rule, - use_conf=use_conf) - register_rules(_ENFORCER) - _ENFORCER.load_rules() - - -def enforce_action(context, action): - """Checks that the action can be done by the given context. - - Applies a check to ensure the context's project_id and user_id can be - applied to the given action using the policy enforcement api. - """ - - return enforce(context, action, {'project_id': context.project_id, - 'user_id': context.user_id}) - - -def enforce(context, action, target): - """Verifies that the action is valid on the target in this context. - - :param context: karbor context - :param action: string representing the action to be checked - this should be colon separated for clarity. - i.e. ``compute:create_instance``, - ``compute:attach_volume``, - ``volume:attach_volume`` - - :param target: dictionary representing the object of the action - for object creation this should be a dictionary representing the - location of the object e.g. ``{'project_id': context.project_id}`` - - :raises PolicyNotAuthorized: if verification fails. - - """ - init() - - return _ENFORCER.enforce(action, - target, - context.to_policy_values(), - do_raise=True, - exc=exception.PolicyNotAuthorized, - action=action) - - -def set_rules(rules, overwrite=True, use_conf=False): - """Set rules based on the provided dict of rules. - - :param rules: New rules to use. It should be an instance of dict. - :param overwrite: Whether to overwrite current rules or update them - with the new rules. - :param use_conf: Whether to reload rules from config file. - """ - - init(use_conf=False) - _ENFORCER.set_rules(rules, overwrite, use_conf) - - -def get_rules(): - if _ENFORCER: - return _ENFORCER.rules - - -def register_rules(enforcer): - enforcer.register_defaults(policies.list_rules()) - - -def get_enforcer(): - # This method is for use by oslopolicy CLI scripts. Those scripts need the - # 'output-file' and 'namespace' options, but having those in sys.argv means - # loading the Karbor config options will fail as those are not expected to - # be present. So we pass in an arg list with those stripped out. - conf_args = [] - # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] - i = 1 - while i < len(sys.argv): - if sys.argv[i].strip('-') in ['namespace', 'output-file']: - i += 2 - continue - conf_args.append(sys.argv[i]) - i += 1 - - cfg.CONF(conf_args, project='karbor') - init() - return _ENFORCER - - -def authorize(context, action, target, do_raise=True, exc=None): - """Verifies that the action is valid on the target in this context. - - :param context: karbor context - :param action: string representing the action to be checked - this should be colon separated for clarity. - i.e. ``compute:create_instance``, - ``plan:create``, - ``plan:get`` - :param target: dictionary representing the object of the action - for object creation this should be a dictionary representing the - location of the object e.g. ``{'project_id': context.project_id}`` - :param do_raise: if True (the default), raises PolicyNotAuthorized; - if False, returns False - :param exc: Class of the exception to raise if the check fails. - Any remaining arguments passed to :meth:`authorize` (both - positional and keyword arguments) will be passed to - the exception class. If not specified, - :class:`PolicyNotAuthorized` will be used. - - :raises karbor.exception.PolicyNotAuthorized: if verification fails - and do_raise is True. Or if 'exc' is specified it will raise an - exception of that type. - - :return: returns a non-False value (not necessarily "True") if - authorized, and the exact value False if not authorized and - do_raise is False. - """ - init() - credentials = context.to_policy_values() - if not exc: - exc = exception.PolicyNotAuthorized - try: - result = _ENFORCER.authorize(action, target, credentials, - do_raise=do_raise, exc=exc, action=action) - except policy.PolicyNotRegistered: - with excutils.save_and_reraise_exception(): - LOG.exception('Policy not registered') - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Policy check for %(action)s failed with credentials ' - '%(credentials)s', - {'action': action, 'credentials': credentials}) - return result - - -def check_is_admin(context): - """Whether or not user is admin according to policy setting. - - """ - init() - # the target is user-self - credentials = context.to_policy_values() - target = credentials - return _ENFORCER.authorize('context_is_admin', target, credentials) diff --git a/karbor/quota.py b/karbor/quota.py deleted file mode 100644 index a88614d8..00000000 --- a/karbor/quota.py +++ /dev/null @@ -1,822 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Quotas for shares.""" - -import datetime - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import timeutils -import six - -from karbor import db -from karbor import exception -from karbor.i18n import _ - -LOG = logging.getLogger(__name__) - -quota_opts = [ - cfg.IntOpt('quota_plans', - default=50, - help='The number of volume backups allowed per project'), - cfg.IntOpt('quota_checkpoints', - default=-1, - help='The number of checkpoints allowed per project'), - cfg.IntOpt('reservation_expire', - default=86400, - help='number of seconds until a reservation expires'), - cfg.IntOpt('until_refresh', - default=0, - help='count of reservations until usage is refreshed'), - cfg.IntOpt('max_age', - default=0, - help='number of seconds between subsequent usage refreshes'), - cfg.StrOpt('quota_driver', - default='karbor.quota.DbQuotaDriver', - help='default driver to use for quota checks'), ] - -CONF = cfg.CONF -CONF.register_opts(quota_opts) - - -class DbQuotaDriver(object): - """Driver to perform necessary checks to enforce quotas and obtain - - quota information. The default driver utilizes the local - database. - """ - - def get_by_project(self, context, project_id, resource): - """Get a specific quota by project.""" - - return db.quota_get(context, project_id, resource) - - def get_by_class(self, context, quota_class, resource): - """Get a specific quota by quota class.""" - - return db.quota_class_get(context, quota_class, resource) - - def get_defaults(self, context, resources): - """Given a list of resources, retrieve the default quotas. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - """ - - quotas = {} - for resource in resources.values(): - quotas[resource.name] = resource.default - - return quotas - - def get_class_quotas(self, context, resources, quota_class, - defaults=True): - """Given a list of resources, retrieve the quotas for the given quota class. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param quota_class: The name of the quota class to return - quotas for. - :param defaults: If True, the default value will be reported - if there is no specific value for the - resource. - """ - - quotas = {} - class_quotas = db.quota_class_get_all_by_name(context, quota_class) - for resource in resources.values(): - if defaults or resource.name in class_quotas: - quotas[resource.name] = class_quotas.get(resource.name, - resource.default) - - return quotas - - def get_project_quotas(self, context, resources, project_id, - quota_class=None, defaults=True, - usages=True): - """Given a list of resources, retrieve the quotas for the given project. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param project_id: The ID of the project to return quotas for. - :param quota_class: If project_id != context.project_id, the - quota class cannot be determined. This - parameter allows it to be specified. It - will be ignored if project_id == - context.project_id. - :param defaults: If True, the quota class value (or the - default value, if there is no value from the - quota class) will be reported if there is no - specific value for the resource. - :param usages: If True, the current in_use and reserved counts - will also be returned. - """ - - quotas = {} - project_quotas = db.quota_get_all_by_project(context, project_id) - if usages: - project_usages = db.quota_usage_get_all_by_project(context, - project_id) - - # Get the quotas for the appropriate class. If the project ID - # matches the one in the context, we use the quota_class from - # the context, otherwise, we use the provided quota_class (if - # any) - if project_id == context.project_id: - quota_class = context.quota_class - if quota_class: - class_quotas = db.quota_class_get_all_by_name(context, quota_class) - else: - class_quotas = {} - - for resource in resources.values(): - # Omit default/quota class values - if not defaults and resource.name not in project_quotas: - continue - - quotas[resource.name] = dict( - limit=project_quotas.get(resource.name, - class_quotas.get(resource.name, - resource.default)), ) - - # Include usages if desired. This is optional because one - # internal consumer of this interface wants to access the - # usages directly from inside a transaction. - if usages: - usage = project_usages.get(resource.name, {}) - quotas[resource.name].update( - in_use=usage.get('in_use', 0), - reserved=usage.get('reserved', 0), ) - - return quotas - - def _get_quotas(self, context, resources, keys, has_sync, project_id=None): - """A helper method which retrieves the quotas for the specific - - resources identified by keys, and which apply to the current - context. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param keys: A list of the desired quotas to retrieve. - :param has_sync: If True, indicates that the resource must - have a sync attribute; if False, indicates - that the resource must NOT have a sync - attribute. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - # Filter resources - if has_sync: - sync_filt = lambda x: hasattr(x, 'sync') # noqa: E731 - else: - sync_filt = lambda x: not hasattr(x, 'sync') # noqa: E731 - desired = set(keys) - sub_resources = dict((k, v) for k, v in resources.items() - if k in desired and sync_filt(v)) - - # Make sure we accounted for all of them... - if len(keys) != len(sub_resources): - unknown = desired - set(sub_resources.keys()) - raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) - - # Grab and return the quotas (without usages) - quotas = self.get_project_quotas(context, sub_resources, - project_id, - context.quota_class, usages=False) - - return dict((k, v['limit']) for k, v in quotas.items()) - - def limit_check(self, context, resources, values, project_id=None): - """Check simple quota limits. - - For limits--those quotas for which there is no usage - synchronization function--this method checks that a set of - proposed values are permitted by the limit restriction. - - This method will raise a QuotaResourceUnknown exception if a - given resource is unknown or if it is not a simple limit - resource. - - If any of the proposed values is over the defined quota, an - OverQuota exception will be raised with the sorted list of the - resources which are too high. Otherwise, the method returns - nothing. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param values: A dictionary of the values to check against the - quota. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - # Ensure no value is less than zero - unders = [key for key, val in values.items() if val < 0] - if unders: - raise exception.InvalidQuotaValue(unders=sorted(unders)) - - # If project_id is None, then we use the project_id in context - if project_id is None: - project_id = context.project_id - - # Get the applicable quotas - quotas = self._get_quotas(context, resources, values.keys(), - has_sync=False, project_id=project_id) - # Check the quotas and construct a list of the resources that - # would be put over limit by the desired values - overs = [key for key, val in values.items() - if quotas[key] >= 0 and quotas[key] < val] - if overs: - raise exception.OverQuota(overs=sorted(overs), quotas=quotas, - usages={}) - - def reserve(self, context, resources, deltas, expire=None, - project_id=None): - """Check quotas and reserve resources. - - For counting quotas--those quotas for which there is a usage - synchronization function--this method checks quotas against - current usage and the desired deltas. - - This method will raise a QuotaResourceUnknown exception if a - given resource is unknown or if it does not have a usage - synchronization function. - - If any of the proposed values is over the defined quota, an - OverQuota exception will be raised with the sorted list of the - resources which are too high. Otherwise, the method returns a - list of reservation UUIDs which were created. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param deltas: A dictionary of the proposed delta changes. - :param expire: An optional parameter specifying an expiration - time for the reservations. If it is a simple - number, it is interpreted as a number of - seconds and added to the current time; if it is - a datetime.timedelta object, it will also be - added to the current time. A datetime.datetime - object will be interpreted as the absolute - expiration time. If None is specified, the - default expiration time set by - --default-reservation-expire will be used (this - value will be treated as a number of seconds). - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - # Set up the reservation expiration - if expire is None: - expire = CONF.reservation_expire - if isinstance(expire, six.integer_types): - expire = datetime.timedelta(seconds=expire) - if isinstance(expire, datetime.timedelta): - expire = timeutils.utcnow() + expire - if not isinstance(expire, datetime.datetime): - raise exception.InvalidReservationExpiration(expire=expire) - - # If project_id is None, then we use the project_id in context - if project_id is None: - project_id = context.project_id - - # Get the applicable quotas. - # NOTE(Vek): We're not worried about races at this point. - # Yes, the admin may be in the process of reducing - # quotas, but that's a pretty rare thing. - quotas = self._get_quotas(context, resources, deltas.keys(), - has_sync=True, project_id=project_id) - - # NOTE(Vek): Most of the work here has to be done in the DB - # API, because we have to do it in a transaction, - # which means access to the session. Since the - # session isn't available outside the DBAPI, we - # have to do the work there. - return db.quota_reserve(context, resources, quotas, deltas, expire, - CONF.until_refresh, CONF.max_age, - project_id=project_id) - - def commit(self, context, reservations, project_id=None): - """Commit reservations. - - :param context: The request context, for access checks. - :param reservations: A list of the reservation UUIDs, as - returned by the reserve() method. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - # If project_id is None, then we use the project_id in context - if project_id is None: - project_id = context.project_id - - db.reservation_commit(context, reservations, project_id=project_id) - - def rollback(self, context, reservations, project_id=None): - """Roll back reservations. - - :param context: The request context, for access checks. - :param reservations: A list of the reservation UUIDs, as - returned by the reserve() method. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - # If project_id is None, then we use the project_id in context - if project_id is None: - project_id = context.project_id - - db.reservation_rollback(context, reservations, project_id=project_id) - - def destroy_all_by_project(self, context, project_id): - """Destroy all quotas, usages, and reservations associated with a project. - - :param context: The request context, for access checks. - :param project_id: The ID of the project being deleted. - """ - - db.quota_destroy_all_by_project(context, project_id) - - def expire(self, context): - """Expire reservations. - - Explores all currently existing reservations and rolls back - any that have expired. - - :param context: The request context, for access checks. - """ - - db.reservation_expire(context) - - -class BaseResource(object): - """Describe a single resource for quota checking.""" - - def __init__(self, name, flag=None): - """Initializes a Resource. - - :param name: The name of the resource, i.e., "shares". - :param flag: The name of the flag or configuration option - which specifies the default value of the quota - for this resource. - """ - - self.name = name - self.flag = flag - - def quota(self, driver, context, **kwargs): - """Given a driver and context, obtain the quota for this resource. - - :param driver: A quota driver. - :param context: The request context. - :param project_id: The project to obtain the quota value for. - If not provided, it is taken from the - context. If it is given as None, no - project-specific quota will be searched - for. - :param quota_class: The quota class corresponding to the - project, or for which the quota is to be - looked up. If not provided, it is taken - from the context. If it is given as None, - no quota class-specific quota will be - searched for. Note that the quota class - defaults to the value in the context, - which may not correspond to the project if - project_id is not the same as the one in - the context. - """ - - # Get the project ID - project_id = kwargs.get('project_id', context.project_id) - - # Ditto for the quota class - quota_class = kwargs.get('quota_class', context.quota_class) - - # Look up the quota for the project - if project_id: - try: - return driver.get_by_project(context, project_id, self.name) - except exception.ProjectQuotaNotFound: - pass - - # Try for the quota class - if quota_class: - try: - return driver.get_by_class(context, quota_class, self.name) - except exception.QuotaClassNotFound: - pass - - # OK, return the default - return self.default - - @property - def default(self): - """Return the default value of the quota.""" - - return CONF[self.flag] if self.flag else -1 - - -class ReservableResource(BaseResource): - """Describe a reservable resource.""" - - def __init__(self, name, sync, flag=None): - """Initializes a ReservableResource. - - Reservable resources are those resources which directly - correspond to objects in the database, i.e., shares, gigabytes, - etc. A ReservableResource must be constructed with a usage - synchronization function, which will be called to determine the - current counts of one or more resources. - - The usage synchronization function will be passed three - arguments: an admin context, the project ID, and an opaque - session object, which should in turn be passed to the - underlying database function. Synchronization functions - should return a dictionary mapping resource names to the - current in_use count for those resources; more than one - resource and resource count may be returned. Note that - synchronization functions may be associated with more than one - ReservableResource. - - :param name: The name of the resource, i.e., "shares". - :param sync: A callable which returns a dictionary to - resynchronize the in_use count for one or more - resources, as described above. - :param flag: The name of the flag or configuration option - which specifies the default value of the quota - for this resource. - """ - - super(ReservableResource, self).__init__(name, flag=flag) - self.sync = sync - - -class AbsoluteResource(BaseResource): - """Describe a non-reservable resource.""" - - pass - - -class CountableResource(AbsoluteResource): - """Describe a resource where the counts aren't based solely on the - - project ID. - """ - - def __init__(self, name, count, flag=None): - """Initializes a CountableResource. - - Countable resources are those resources which directly - correspond to objects in the database, i.e., shares, gigabytes, - etc., but for which a count by project ID is inappropriate. A - CountableResource must be constructed with a counting - function, which will be called to determine the current counts - of the resource. - - The counting function will be passed the context, along with - the extra positional and keyword arguments that are passed to - Quota.count(). It should return an integer specifying the - count. - - Note that this counting is not performed in a transaction-safe - manner. This resource class is a temporary measure to provide - required functionality, until a better approach to solving - this problem can be evolved. - - :param name: The name of the resource, i.e., "shares". - :param count: A callable which returns the count of the - resource. The arguments passed are as described - above. - :param flag: The name of the flag or configuration option - which specifies the default value of the quota - for this resource. - """ - - super(CountableResource, self).__init__(name, flag=flag) - self.count = count - - -class QuotaEngine(object): - """Represent the set of recognized quotas.""" - - def __init__(self, quota_driver_class=None): - """Initialize a Quota object.""" - - if not quota_driver_class: - quota_driver_class = CONF.quota_driver - - if isinstance(quota_driver_class, six.string_types): - quota_driver_class = importutils.import_object(quota_driver_class) - - self._resources = {} - self._driver = quota_driver_class - - def __contains__(self, resource): - return resource in self._resources - - def register_resource(self, resource): - """Register a resource.""" - - self._resources[resource.name] = resource - - def register_resources(self, resources): - """Register a list of resources.""" - - for resource in resources: - self.register_resource(resource) - - def get_by_project(self, context, project_id, resource): - """Get a specific quota by project.""" - - return self._driver.get_by_project(context, project_id, resource) - - def get_by_class(self, context, quota_class, resource): - """Get a specific quota by quota class.""" - - return self._driver.get_by_class(context, quota_class, resource) - - def get_defaults(self, context): - """Retrieve the default quotas. - - :param context: The request context, for access checks. - """ - - return self._driver.get_defaults(context, self._resources) - - def get_class_quotas(self, context, quota_class, defaults=True): - """Retrieve the quotas for the given quota class. - - :param context: The request context, for access checks. - :param quota_class: The name of the quota class to return - quotas for. - :param defaults: If True, the default value will be reported - if there is no specific value for the - resource. - """ - - return self._driver.get_class_quotas(context, self._resources, - quota_class, defaults=defaults) - - def get_project_quotas(self, context, project_id, quota_class=None, - defaults=True, usages=True): - """Retrieve the quotas for the given project. - - :param context: The request context, for access checks. - :param project_id: The ID of the project to return quotas for. - :param quota_class: If project_id != context.project_id, the - quota class cannot be determined. This - parameter allows it to be specified. - :param defaults: If True, the quota class value (or the - default value, if there is no value from the - quota class) will be reported if there is no - specific value for the resource. - :param usages: If True, the current in_use and reserved counts - will also be returned. - """ - - return self._driver.get_project_quotas(context, self._resources, - project_id, - quota_class=quota_class, - defaults=defaults, - usages=usages) - - def count(self, context, resource, *args, **kwargs): - """Count a resource. - - For countable resources, invokes the count() function and - returns its result. Arguments following the context and - resource are passed directly to the count function declared by - the resource. - - :param context: The request context, for access checks. - :param resource: The name of the resource, as a string. - """ - - # Get the resource - res = self._resources.get(resource) - if not res or not hasattr(res, 'count'): - raise exception.QuotaResourceUnknown(unknown=[resource]) - - return res.count(context, *args, **kwargs) - - def limit_check(self, context, project_id=None, **values): - """Check simple quota limits. - - For limits--those quotas for which there is no usage - synchronization function--this method checks that a set of - proposed values are permitted by the limit restriction. The - values to check are given as keyword arguments, where the key - identifies the specific quota limit to check, and the value is - the proposed value. - - This method will raise a QuotaResourceUnknown exception if a - given resource is unknown or if it is not a simple limit - resource. - - If any of the proposed values is over the defined quota, an - OverQuota exception will be raised with the sorted list of the - resources which are too high. Otherwise, the method returns - nothing. - - :param context: The request context, for access checks. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - return self._driver.limit_check(context, self._resources, values, - project_id=project_id) - - def reserve(self, context, expire=None, project_id=None, **deltas): - """Check quotas and reserve resources. - - For counting quotas--those quotas for which there is a usage - synchronization function--this method checks quotas against - current usage and the desired deltas. The deltas are given as - keyword arguments, and current usage and other reservations - are factored into the quota check. - - This method will raise a QuotaResourceUnknown exception if a - given resource is unknown or if it does not have a usage - synchronization function. - - If any of the proposed values is over the defined quota, an - OverQuota exception will be raised with the sorted list of the - resources which are too high. Otherwise, the method returns a - list of reservation UUIDs which were created. - - :param context: The request context, for access checks. - :param expire: An optional parameter specifying an expiration - time for the reservations. If it is a simple - number, it is interpreted as a number of - seconds and added to the current time; if it is - a datetime.timedelta object, it will also be - added to the current time. A datetime.datetime - object will be interpreted as the absolute - expiration time. If None is specified, the - default expiration time set by - --default-reservation-expire will be used (this - value will be treated as a number of seconds). - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - reservations = self._driver.reserve(context, self._resources, deltas, - expire=expire, - project_id=project_id) - - LOG.debug(_("Created reservations %(reservations)s") % - {"reservations": reservations}) - - return reservations - - def commit(self, context, reservations, project_id=None): - """Commit reservations. - - :param context: The request context, for access checks. - :param reservations: A list of the reservation UUIDs, as - returned by the reserve() method. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - try: - self._driver.commit(context, reservations, project_id=project_id) - except Exception: - # NOTE(Vek): Ignoring exceptions here is safe, because the - # usage resynchronization and the reservation expiration - # mechanisms will resolve the issue. The exception is - # logged, however, because this is less than optimal. - LOG.exception(_("Failed to commit reservations " - "%(reservations)s") % - {"reservations": reservations}) - - def rollback(self, context, reservations, project_id=None): - """Roll back reservations. - - :param context: The request context, for access checks. - :param reservations: A list of the reservation UUIDs, as - returned by the reserve() method. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - try: - self._driver.rollback(context, reservations, project_id=project_id) - except Exception: - # NOTE(Vek): Ignoring exceptions here is safe, because the - # usage resynchronization and the reservation expiration - # mechanisms will resolve the issue. The exception is - # logged, however, because this is less than optimal. - LOG.exception(_("Failed to roll back reservations " - "%(reservations)s") % - {"reservations": reservations}) - - def destroy_all_by_project(self, context, project_id): - """Destroy all quotas, usages, and reservations associated with a - - project. - - :param context: The request context, for access checks. - :param project_id: The ID of the project being deleted. - """ - - self._driver.destroy_all_by_project(context, project_id) - - def expire(self, context): - """Expire reservations. - - Explores all currently existing reservations and rolls back - any that have expired. - - :param context: The request context, for access checks. - """ - - self._driver.expire(context) - - @property - def resources(self): - return sorted(self._resources.keys()) - - -QUOTAS = QuotaEngine() - - -resources = [ - ReservableResource('plans', None, - 'quota_plans'), - ReservableResource('checkpoints', None, - 'quota_checkpoints'), -] - - -QUOTAS.register_resources(resources) - - -OVER_QUOTA_RESOURCE_EXCEPTIONS = { - 'plans': exception.PlanLimitExceeded, - 'checkpoints': exception.CheckpointLimitExceeded -} - - -def process_reserve_over_quota(context, over_quota_exception, - resource, size=None): - """Handle OverQuota exception. - - Analyze OverQuota exception, and raise new exception related to - resource type. If there are unexpected items in overs, - UnexpectedOverQuota is raised. - - :param context: security context - :param over_quota_exception: OverQuota exception - :param resource: can be backups, snapshots, and volumes - :param size: requested size in reservation - """ - def _consumed(name): - return usages[name]['reserved'] + usages[name]['in_use'] - - overs = over_quota_exception.kwargs['overs'] - usages = over_quota_exception.kwargs['usages'] - quotas = over_quota_exception.kwargs['quotas'] - invalid_overs = [] - - for over in overs: - if (resource in OVER_QUOTA_RESOURCE_EXCEPTIONS.keys() and - resource in over): - msg = ("Quota exceeded for %(s_pid)s, tried to create " - "%(s_resource)s (%(d_consumed)d %(s_resource)ss " - "already consumed).") - LOG.warning(msg, {'s_pid': context.project_id, - 'd_consumed': _consumed(over), - 's_resource': resource[:-1]}) - raise OVER_QUOTA_RESOURCE_EXCEPTIONS[resource]( - allowed=quotas[over], - name=over) - invalid_overs.append(over) - - if invalid_overs: - raise exception.UnexpectedOverQuota(name=', '.join(invalid_overs)) diff --git a/karbor/resource.py b/karbor/resource.py deleted file mode 100644 index 90c8c740..00000000 --- a/karbor/resource.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class Resource(object): - __slots__ = ('type', 'id', 'name', 'extra_info') - - def __init__(self, type, id, name, extra_info=None): - self.type = type - self.id = id - self.name = name - self.extra_info = extra_info - - def __setattr__(self, key, value): - try: - getattr(self, key) - except AttributeError: - pass - else: - raise AttributeError() - - return super(Resource, self).__setattr__(key, value) - - def __hash__(self): - return hash(self.key) - - def __eq__(self, other): - return self.key == other.key - - def to_dict(self): - return {item: getattr(self, item) for item in self.__slots__} - - @property - def key(self): - return (self.type, self.id, self.name) diff --git a/karbor/rpc.py b/karbor/rpc.py deleted file mode 100644 index 37b6d7aa..00000000 --- a/karbor/rpc.py +++ /dev/null @@ -1,144 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'init', - 'cleanup', - 'set_defaults', - 'add_extra_exmods', - 'clear_extra_exmods', - 'get_allowed_exmods', - 'RequestContextSerializer', - 'get_client', - 'get_server', - 'get_notifier', -] - -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_messaging.rpc import dispatcher - -import karbor.context -import karbor.exception -from karbor import utils - -CONF = cfg.CONF -TRANSPORT = None -NOTIFICATION_TRANSPORT = None -NOTIFIER = None - -ALLOWED_EXMODS = [ - karbor.exception.__name__, -] -EXTRA_EXMODS = [] - - -def init(conf): - - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - exmods = get_allowed_exmods() - TRANSPORT = messaging.get_rpc_transport(conf, - allowed_remote_exmods=exmods) - NOTIFICATION_TRANSPORT = messaging.get_notification_transport( - conf, - allowed_remote_exmods=exmods) - - # get_notification_transport has loaded oslo_messaging_notifications config - # group, so we can now check if notifications are actually enabled. - if utils.notifications_enabled(conf): - json_serializer = messaging.JsonPayloadSerializer() - serializer = RequestContextSerializer(json_serializer) - NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer) - else: - NOTIFIER = utils.DO_NOTHING - - -def initialized(): - return None not in [TRANSPORT, NOTIFIER] - - -def cleanup(): - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - assert TRANSPORT is not None - - TRANSPORT.cleanup() - NOTIFICATION_TRANSPORT.cleanup() - TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None - - -def set_defaults(control_exchange): - messaging.set_transport_defaults(control_exchange) - - -def add_extra_exmods(*args): - EXTRA_EXMODS.extend(args) - - -def clear_extra_exmods(): - del EXTRA_EXMODS[:] - - -def get_allowed_exmods(): - return ALLOWED_EXMODS + EXTRA_EXMODS - - -class RequestContextSerializer(messaging.Serializer): - - def __init__(self, base): - self._base = base - super(RequestContextSerializer, self).__init__() - - def serialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.serialize_entity(context, entity) - - def deserialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.deserialize_entity(context, entity) - - def serialize_context(self, context): - _context = context.to_dict() - return _context - - def deserialize_context(self, context): - return karbor.context.RequestContext.from_dict(context) - - -def get_client(target, version_cap=None, serializer=None): - assert TRANSPORT is not None - serializer = RequestContextSerializer(serializer) - return messaging.RPCClient(TRANSPORT, - target, - version_cap=version_cap, - serializer=serializer) - - -def get_server(target, endpoints, serializer=None): - assert TRANSPORT is not None - serializer = RequestContextSerializer(serializer) - access_policy = dispatcher.DefaultRPCAccessPolicy - return messaging.get_rpc_server(TRANSPORT, - target, - endpoints, - executor='eventlet', - serializer=serializer, - access_policy=access_policy) - - -def get_notifier(service=None, host=None, publisher_id=None): - assert NOTIFIER is not None - if not publisher_id: - publisher_id = "%s.%s" % (service, host or CONF.host) - return NOTIFIER.prepare(publisher_id=publisher_id) diff --git a/karbor/service.py b/karbor/service.py deleted file mode 100644 index 262f018d..00000000 --- a/karbor/service.py +++ /dev/null @@ -1,449 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generic Node base class for all workers that run on hosts.""" - - -import inspect -import os -import random - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_service import loopingcall -from oslo_service import service -from oslo_service import wsgi -from oslo_utils import importutils - -from karbor import context -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor.objects import base as objects_base -from karbor import rpc -from karbor import version - -LOG = logging.getLogger(__name__) - -service_opts = [ - cfg.IntOpt('report_interval', - default=10, - help='Interval, in seconds, between nodes reporting state ' - 'to datastore'), - cfg.IntOpt('periodic_interval', - default=60, - help='Interval, in seconds, between running periodic tasks'), - cfg.IntOpt('periodic_fuzzy_delay', - default=60, - help='Range, in seconds, to randomly delay when starting the' - ' periodic task OperationEngine to reduce stampeding.' - ' (Disable by setting to 0)'), - cfg.HostAddressOpt('osapi_karbor_listen', - default="0.0.0.0", - help='IP address on which OpenStack Karbor ' - 'API listens'), - cfg.PortOpt('osapi_karbor_listen_port', - default=8799, - help='Port on which OpenStack Karbor API listens'), - cfg.IntOpt('osapi_karbor_workers', - help='Number of workers for OpenStack Karbor API service. ' - 'The default is equal to the number of CPUs available.'), ] - -CONF = cfg.CONF -CONF.register_opts(service_opts) - - -class Service(service.Service): - """Service object for binaries running on hosts. - - A service takes a manager and enables rpc by listening to queues based - on topic. It also periodically runs tasks on the manager and reports - it state to the database services table. - """ - - def __init__(self, host, binary, topic, manager, report_interval=None, - periodic_interval=None, periodic_fuzzy_delay=None, - service_name=None, *args, **kwargs): - super(Service, self).__init__() - - rpc.init(CONF) - - self.host = host - self.binary = binary - self.topic = topic - self.manager_class_name = manager - manager_class = importutils.import_class(self.manager_class_name) - self.manager = manager_class(host=self.host, - service_name=service_name, - *args, **kwargs) - self.report_interval = report_interval - self.periodic_interval = periodic_interval - self.periodic_fuzzy_delay = periodic_fuzzy_delay - self.basic_config_check() - self.saved_args, self.saved_kwargs = args, kwargs - self.timers = [] - - self.rpcserver = None - - def start(self): - version_string = version.version_string() - LOG.info('Starting %(topic)s node (version %(version_string)s)', - {'topic': self.topic, 'version_string': version_string}) - self.model_disconnected = False - ctxt = context.get_admin_context() - try: - service_ref = db.service_get_by_args(ctxt, - self.host, - self.binary) - self.service_id = service_ref['id'] - except exception.NotFound: - self._create_service_ref(ctxt) - - self.manager.init_host(service_id=self.service_id) - - LOG.debug("Creating RPC server for service %s", self.topic) - - target = messaging.Target(topic=self.topic, server=self.host) - endpoints = [self.manager] - endpoints.extend(self.manager.additional_endpoints) - serializer = objects_base.KarborObjectSerializer() - self.rpcserver = rpc.get_server(target, endpoints, serializer) - self.rpcserver.start() - - self.manager.init_host_with_rpc() - - if self.report_interval: - pulse = loopingcall.FixedIntervalLoopingCall( - self.report_state) - pulse.start(interval=self.report_interval, - initial_delay=self.report_interval) - self.timers.append(pulse) - - if self.periodic_interval: - if self.periodic_fuzzy_delay: - initial_delay = random.randint(0, self.periodic_fuzzy_delay) - else: - initial_delay = None - - periodic = loopingcall.FixedIntervalLoopingCall( - self.periodic_tasks) - periodic.start(interval=self.periodic_interval, - initial_delay=initial_delay) - self.timers.append(periodic) - - def basic_config_check(self): - """Perform basic config checks before starting service.""" - # Make sure report interval is less than service down time - if self.report_interval: - if CONF.service_down_time <= self.report_interval: - new_down_time = int(self.report_interval * 2.5) - LOG.warning( - "Report interval must be less than service down " - "time. Current config service_down_time: " - "%(service_down_time)s, report_interval for this: " - "service is: %(report_interval)s. Setting global " - "service_down_time to: %(new_down_time)s", - {'service_down_time': CONF.service_down_time, - 'report_interval': self.report_interval, - 'new_down_time': new_down_time}) - CONF.set_override('service_down_time', new_down_time) - - def _create_service_ref(self, context): - service_ref = db.service_create(context, - {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0}) - self.service_id = service_ref['id'] - - def __getattr__(self, key): - manager = self.__dict__.get('manager', None) - return getattr(manager, key) - - @classmethod - def create(cls, host=None, binary=None, topic=None, manager=None, - report_interval=None, periodic_interval=None, - periodic_fuzzy_delay=None, service_name=None): - """Instantiates class and passes back application object. - - :param host: defaults to CONF.host - :param binary: defaults to basename of executable - :param topic: defaults to bin_name - 'karbor-' part - :param manager: defaults to CONF._manager - :param report_interval: defaults to CONF.report_interval - :param periodic_interval: defaults to CONF.periodic_interval - :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay - - """ - if not host: - host = CONF.host - if not binary: - binary = os.path.basename(inspect.stack()[-1][1]) - if not topic: - topic = binary - if not manager: - subtopic = topic.rpartition('karbor-')[2] - manager = CONF.get('%s_manager' % subtopic, None) - if report_interval is None: - report_interval = CONF.report_interval - if periodic_interval is None: - periodic_interval = CONF.periodic_interval - if periodic_fuzzy_delay is None: - periodic_fuzzy_delay = CONF.periodic_fuzzy_delay - service_obj = cls(host, binary, topic, manager, - report_interval=report_interval, - periodic_interval=periodic_interval, - periodic_fuzzy_delay=periodic_fuzzy_delay, - service_name=service_name) - - return service_obj - - def kill(self): - """Destroy the service object in the datastore.""" - self.stop() - try: - db.service_destroy(context.get_admin_context(), self.service_id) - except exception.NotFound: - LOG.warning('Service killed that has no database entry') - - def stop(self): - # Try to shut the connection down, but if we get any sort of - # errors, go ahead and ignore them.. as we're shutting down anyway - try: - self.rpcserver.stop() - except Exception: - pass - for x in self.timers: - try: - x.stop() - except Exception: - pass - self.timers = [] - super(Service, self).stop() - - def wait(self): - for x in self.timers: - try: - x.wait() - except Exception: - pass - if self.rpcserver: - self.rpcserver.wait() - - def periodic_tasks(self, raise_on_error=False): - """Tasks to be run at a periodic interval.""" - ctxt = context.get_admin_context() - self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) - - def report_state(self): - """Update the state of this service in the datastore.""" - if not self.manager.is_working(): - # NOTE(dulek): If manager reports a problem we're not sending - # heartbeats - to indicate that service is actually down. - LOG.error('Manager for service %(binary)s %(host)s is ' - 'reporting problems, not sending heartbeat. ' - 'Service will appear "down".', - {'binary': self.binary, - 'host': self.host}) - return - - ctxt = context.get_admin_context() - state_catalog = {} - try: - try: - service_ref = db.service_get(ctxt, self.service_id) - except exception.NotFound: - LOG.debug('The service database object disappeared, ' - 'recreating it.') - self._create_service_ref(ctxt) - service_ref = db.service_get(ctxt, self.service_id) - - state_catalog['report_count'] = service_ref['report_count'] + 1 - - db.service_update(ctxt, - self.service_id, state_catalog) - - # TODO(termie): make this pattern be more elegant. - if getattr(self, 'model_disconnected', False): - self.model_disconnected = False - LOG.error('Recovered model server connection!') - - except db_exc.DBConnectionError: - if not getattr(self, 'model_disconnected', False): - self.model_disconnected = True - LOG.exception('model server went away') - - # NOTE(jsbryant) Other DB errors can happen in HA configurations. - # such errors shouldn't kill this thread, so we handle them here. - except db_exc.DBError: - if not getattr(self, 'model_disconnected', False): - self.model_disconnected = True - LOG.exception('DBError encountered: ') - - except Exception: - if not getattr(self, 'model_disconnected', False): - self.model_disconnected = True - LOG.exception('Exception encountered: ') - - -class WSGIService(service.ServiceBase): - """Provides ability to launch API from a 'paste' configuration.""" - - def __init__(self, name, loader=None): - """Initialize, but do not start the WSGI server. - - :param name: The name of the WSGI server given to the loader. - :param loader: Loads the WSGI application using the given name. - :returns: None - - """ - self.name = name - self.manager = self._get_manager() - self.loader = loader or wsgi.Loader(CONF) - self.app = self.loader.load_app(name) - self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") - self.port = getattr(CONF, '%s_listen_port' % name, 0) - self.workers = (getattr(CONF, '%s_workers' % name, None) or - processutils.get_worker_count()) - if self.workers and self.workers < 1: - worker_name = '%s_workers' % name - msg = (_("%(worker_name)s value of %(workers)d is invalid, " - "must be greater than 0.") % - {'worker_name': worker_name, - 'workers': self.workers}) - raise exception.InvalidInput(msg) - - self.server = wsgi.Server(CONF, - name, - self.app, - host=self.host, - port=self.port) - super(WSGIService, self).__init__() - - def _get_manager(self): - """Initialize a Manager object appropriate for this service. - - Use the service name to look up a Manager subclass from the - configuration and initialize an instance. If no class name - is configured, just return None. - - :returns: a Manager instance, or None. - - """ - fl = '%s_manager' % self.name - if fl not in CONF: - return None - - manager_class_name = CONF.get(fl, None) - if not manager_class_name: - return None - - manager_class = importutils.import_class(manager_class_name) - return manager_class() - - def start(self): - """Start serving this service using loaded configuration. - - Also, retrieve updated port number in case '0' was passed in, which - indicates a random port should be used. - - :returns: None - - """ - if self.manager: - self.manager.init_host() - self.server.start() - self.port = self.server.port - - def stop(self): - """Stop serving this API. - - :returns: None - - """ - self.server.stop() - - def wait(self): - """Wait for the service to stop serving this API. - - :returns: None - - """ - self.server.wait() - - def reset(self): - """Reset server greenpool size to default. - - :returns: None - - """ - self.server.reset() - - -def process_launcher(): - return service.ProcessLauncher(CONF) - - -# NOTE(vish): the global launcher is to maintain the existing -# functionality of calling service.serve + -# service.wait -_launcher = None - - -def serve(server, workers=None): - global _launcher - if _launcher: - raise RuntimeError(_('serve() can only be called once')) - - _launcher = service.launch(CONF, server, workers=workers) - - -def wait(): - LOG.debug('Full set of CONF:') - for flag in CONF: - flag_get = CONF.get(flag, None) - # hide flag contents from log if contains a password - # should use secret flag when switch over to openstack-common - if ("_password" in flag or "_key" in flag or - (flag == "transport_url" and "rabbit:" in flag_get) or - (flag == "sql_connection" and - ("mysql:" in flag_get or "postgresql:" in flag_get))): - LOG.debug('%s : FLAG SET ', flag) - else: - LOG.debug('%(flag)s : %(flag_get)s', - {'flag': flag, 'flag_get': flag_get}) - try: - _launcher.wait() - except KeyboardInterrupt: - _launcher.stop() - rpc.cleanup() - - -class Launcher(object): - def __init__(self): - super(Launcher, self).__init__() - self.launch_service = serve - self.wait = wait - - -def get_launcher(): - # Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows - # due to missing support of non-blocking I/O pipes. For this reason, the - # service must be spawned differently on Windows, using the ServiceLauncher - # class instead. - if os.name == 'nt': - return Launcher() - else: - return process_launcher() diff --git a/karbor/services/__init__.py b/karbor/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/operationengine/__init__.py b/karbor/services/operationengine/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/operationengine/api.py b/karbor/services/operationengine/api.py deleted file mode 100644 index bda64bf1..00000000 --- a/karbor/services/operationengine/api.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Handles all requests relating to OperationEngine.""" - - -from karbor.db import base -from karbor.services.operationengine import rpcapi as oe_rpcapi - - -class API(base.Base): - """API for interacting with the OperationEngine manager.""" - - def __init__(self, db_driver=None): - self.operationengine_rpcapi = oe_rpcapi.OperationEngineAPI() - super(API, self).__init__(db_driver) - - def create_scheduled_operation(self, context, operation): - self.operationengine_rpcapi.create_scheduled_operation( - context, operation) - - def delete_scheduled_operation(self, context, operation_id, trigger_id): - self.operationengine_rpcapi.delete_scheduled_operation( - context, operation_id, trigger_id) - - def suspend_scheduled_operation(self, context, operation_id, trigger_id): - self.operationengine_rpcapi.suspend_scheduled_operation( - context, operation_id, trigger_id) - - def resume_scheduled_operation(self, context, operation_id, trigger_id): - self.operationengine_rpcapi.resume_scheduled_operation( - context, operation_id, trigger_id) - - def verify_trigger(self, context, trigger): - self.operationengine_rpcapi.verify_trigger(context, trigger) - - def create_trigger(self, context, trigger): - self.operationengine_rpcapi.create_trigger(context, trigger) - - def delete_trigger(self, context, trigger_id): - self.operationengine_rpcapi.delete_trigger(context, trigger_id) - - def update_trigger(self, context, trigger): - self.operationengine_rpcapi.update_trigger(context, trigger) diff --git a/karbor/services/operationengine/engine/__init__.py b/karbor/services/operationengine/engine/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/operationengine/engine/executors/__init__.py b/karbor/services/operationengine/engine/executors/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/operationengine/engine/executors/base.py b/karbor/services/operationengine/engine/executors/base.py deleted file mode 100644 index e91271be..00000000 --- a/karbor/services/operationengine/engine/executors/base.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Executor which receives operations and run them. -""" - -from abc import ABCMeta -from abc import abstractmethod -import six - - -@six.add_metaclass(ABCMeta) -class BaseExecutor(object): - def __init__(self, operation_manager): - self._operation_manager = operation_manager - super(BaseExecutor, self).__init__() - - @abstractmethod - def execute_operation(self, operation_id, triggered_time, - expect_start_time, window_time, **kwargs): - """Execute an operation. - - :param operation_id: ID of operation - :param triggered_time: time when the operation is triggered - :param expect_start_time: expect time when to run the operation - :param window_time: time how long to wait to run the operation after - expect_start_time - """ - pass - - @abstractmethod - def cancel_operation(self, operation_id): - """Cancel the execution of operation. - - There is no effective for the operations which are running, but - for operations which are in waiting, they will not be executed. - - :param operation_id: ID of operation - """ - pass - - @abstractmethod - def resume_operation(self, operation_id, **kwargs): - """Resume operations. - - Get operations which are not finished from DB by operation_id, - and execute them again. - - :param operation_id: ID of operation - """ - pass - - @abstractmethod - def shutdown(self): - """Shutdown the executor""" - pass diff --git a/karbor/services/operationengine/engine/executors/green_thread_executor.py b/karbor/services/operationengine/engine/executors/green_thread_executor.py deleted file mode 100644 index ee694ccb..00000000 --- a/karbor/services/operationengine/engine/executors/green_thread_executor.py +++ /dev/null @@ -1,188 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -import greenlet - -from datetime import datetime -from datetime import timedelta -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from karbor.common import constants -from karbor import context -from karbor import objects -from karbor.services.operationengine.engine.executors import base - - -green_thread_executor_opts = [ - cfg.IntOpt('max_concurrent_operations', - default=0, - help='number of maximum concurrent running operations,' - '0 means no hard limit' - ) -] - -CONF = cfg.CONF -CONF.register_opts(green_thread_executor_opts, 'operationengine') - -LOG = logging.getLogger(__name__) - - -class GreenThreadExecutor(base.BaseExecutor): - - def __init__(self, operation_manager): - super(GreenThreadExecutor, self).__init__(operation_manager) - self._operation_thread_map = {} - - def execute_operation(self, operation_id, triggered_time, - expect_start_time, window_time, **kwargs): - - if operation_id in self._operation_thread_map: - LOG.warning("Execute operation(%s), the previous one has not been" - " finished", operation_id) - return - - num = CONF.operationengine.max_concurrent_operations - if num and len(self._operation_thread_map) >= num: - LOG.warning("The amount of concurrent running operations " - "exceeds %d", num) - return - self._operation_thread_map[operation_id] = None - - end_time_for_run = expect_start_time + timedelta(seconds=window_time) - ret = self._update_operation_state( - operation_id, - {'state': constants.OPERATION_STATE_RUNNING, - 'end_time_for_run': end_time_for_run}) - if not ret: - self._operation_thread_map.pop(operation_id, None) - return - - if operation_id not in self._operation_thread_map: - # This function is invoked by trigger which may runs in the - # green thread. So if operation_id is not exist, it may be - # canceled by 'cancel_operation' during the call to DB in - # the codes above. - LOG.warning("Operation(%s) is not exist after call to DB", - operation_id) - return - - param = { - 'operation_id': operation_id, - 'triggered_time': triggered_time, - 'expect_start_time': expect_start_time, - 'window_time': window_time, - 'run_type': constants.OPERATION_RUN_TYPE_EXECUTE - } - try: - self._create_thread(self._run_operation, operation_id, param) - except Exception: - self._operation_thread_map.pop(operation_id, None) - LOG.exception("Execute operation (%s), and create green thread " - "failed", operation_id) - - def cancel_operation(self, operation_id): - gt = self._operation_thread_map.get(operation_id, None) - if gt is not None: # can not use 'if gt' instead - # If the thead has not started, it will be killed; - # else, it will run until finishes its work. - gt.cancel() - else: - self._operation_thread_map.pop(operation_id, None) - - def resume_operation(self, operation_id, **kwargs): - end_time = kwargs.get('end_time_for_run') - now = datetime.utcnow() - if not isinstance(end_time, datetime) or now > end_time: - return - - window = int(timeutils.delta_seconds(now, end_time)) - param = { - 'operation_id': operation_id, - 'triggered_time': now, - 'expect_start_time': now, - 'window_time': window, - 'run_type': constants.OPERATION_RUN_TYPE_RESUME - } - self._create_thread(self._run_operation, operation_id, param) - - def shutdown(self): - for op_id, gt in self._operation_thread_map.items(): - if gt is None: - continue - - gt.cancel() - try: - gt.wait() # wait untile the thread finishes its work - except (greenlet.GreenletExit, Exception): - pass - - self._operation_thread_map = {} - - def _run_operation(self, operation_id, param): - - try: - try: - operation = objects.ScheduledOperation.get_by_id( - context.get_admin_context(), operation_id) - except Exception: - LOG.exception("Run operation(%s), get operation failed", - operation_id) - return - - try: - param['user_id'] = operation.user_id - param['project_id'] = operation.project_id - param['trigger_id'] = operation.trigger_id - param['scheduled_operation_id'] = operation.id - - self._operation_manager.run_operation( - operation.operation_type, - operation.operation_definition, - param=param) - except Exception: - LOG.exception("Run operation(%s) failed", operation_id) - - finally: - self._update_operation_state( - operation_id, - {'state': constants.OPERATION_STATE_REGISTERED}) - - def _update_operation_state(self, operation_id, updates): - - ctxt = context.get_admin_context() - try: - state_ref = objects.ScheduledOperationState.get_by_operation_id( - ctxt, operation_id) - for item, value in updates.items(): - setattr(state_ref, item, value) - state_ref.save() - except Exception: - LOG.exception("Execute operation(%s), update state failed", - operation_id) - return False - return True - - def _on_gt_done(self, gt, *args, **kwargs): - op_id = args[0] - try: - del self._operation_thread_map[op_id] - except Exception: - LOG.warning("Unknown operation id(%s) received, " - "when the green thread exit", op_id) - - def _create_thread(self, function, operation_id, param): - gt = eventlet.spawn(function, operation_id, param) - self._operation_thread_map[operation_id] = gt - gt.link(self._on_gt_done, operation_id) diff --git a/karbor/services/operationengine/engine/executors/scheduled_operation_executor.py b/karbor/services/operationengine/engine/executors/scheduled_operation_executor.py deleted file mode 100644 index aa2d7aeb..00000000 --- a/karbor/services/operationengine/engine/executors/scheduled_operation_executor.py +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from abc import abstractmethod -from datetime import datetime -from datetime import timedelta -from oslo_log import log as logging -from oslo_utils import timeutils - -from karbor.common import constants -from karbor import context -from karbor import objects -from karbor.services.operationengine.engine.executors import base - - -LOG = logging.getLogger(__name__) - - -class ScheduledOperationExecutor(base.BaseExecutor): - - _CHECK_ITEMS = { - 'is_waiting': 'is_waiting', - 'is_canceled': 'is_canceled' - } - - def execute_operation(self, operation_id, triggered_time, - expect_start_time, window_time, **kwargs): - - if self._check_operation(operation_id, self._CHECK_ITEMS.values()): - LOG.warning("Execute operation(%s), it can't be executed", - operation_id) - return - - end_time_for_run = expect_start_time + timedelta(seconds=window_time) - ret = self._update_operation_state( - operation_id, - {'state': constants.OPERATION_STATE_RUNNING, - 'end_time_for_run': end_time_for_run}) - if not ret: - return - - param = { - 'operation_id': operation_id, - 'triggered_time': triggered_time, - 'expect_start_time': expect_start_time, - 'window_time': window_time, - 'run_type': constants.OPERATION_RUN_TYPE_EXECUTE - } - self._execute_operation(operation_id, self._run_operation, param) - - def resume_operation(self, operation_id, **kwargs): - end_time = kwargs.get('end_time_for_run') - now = datetime.utcnow() - if not isinstance(end_time, datetime) or now > end_time: - return - - window = int(timeutils.delta_seconds(now, end_time)) - param = { - 'operation_id': operation_id, - 'triggered_time': now, - 'expect_start_time': now, - 'window_time': window, - 'run_type': constants.OPERATION_RUN_TYPE_RESUME - } - self._execute_operation(operation_id, self._run_operation, param) - - def _run_operation(self, operation_id, param): - - try: - check_item = [self._CHECK_ITEMS['is_canceled']] - if self._check_operation(operation_id, check_item): - return - - try: - operation = objects.ScheduledOperation.get_by_id( - context.get_admin_context(), operation_id) - except Exception: - LOG.exception("Run operation(%s), get operation failed", - operation_id) - return - - try: - param['user_id'] = operation.user_id - param['project_id'] = operation.project_id - param['trigger_id'] = operation.trigger_id - param['scheduled_operation_id'] = operation.id - - self._operation_manager.run_operation( - operation.operation_type, - operation.operation_definition, - param=param) - except Exception: - LOG.exception("Run operation(%s) failed", operation_id) - - finally: - self._update_operation_state( - operation_id, - {'state': constants.OPERATION_STATE_REGISTERED}) - - def _update_operation_state(self, operation_id, updates): - - ctxt = context.get_admin_context() - try: - state_ref = objects.ScheduledOperationState.get_by_operation_id( - ctxt, operation_id) - for item, value in updates.items(): - setattr(state_ref, item, value) - state_ref.save() - except Exception: - LOG.exception("Execute operation(%s), update state failed", - operation_id) - return False - return True - - @abstractmethod - def _execute_operation(self, operation_id, funtion, param): - pass - - @abstractmethod - def _check_operation(self, operation_id, check_items): - """Check whether the item in check_items happens""" - pass diff --git a/karbor/services/operationengine/engine/executors/thread_pool_executor.py b/karbor/services/operationengine/engine/executors/thread_pool_executor.py deleted file mode 100644 index f308bb70..00000000 --- a/karbor/services/operationengine/engine/executors/thread_pool_executor.py +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import defaultdict -import futurist -from oslo_config import cfg -from oslo_log import log as logging -from threading import RLock - -from karbor.services.operationengine.engine.executors import \ - scheduled_operation_executor as base_executor - -executor_opts = [ - cfg.IntOpt('thread_count', - default=10, - help='The count of thread which executor will start') -] - -CONF = cfg.CONF -CONF.register_opts(executor_opts) - -LOG = logging.getLogger(__name__) - - -class ThreadPoolExecutor(base_executor.ScheduledOperationExecutor): - - def __init__(self, operation_manager, thread_count=None): - super(ThreadPoolExecutor, self).__init__(operation_manager) - - if thread_count is None: - thread_count = CONF.thread_count - - self._pool = futurist.GreenThreadPoolExecutor(thread_count) - self._operation_to_run = defaultdict(int) - self._operation_to_cancel = set() - self._lock = RLock() - - self._check_functions = { - self._CHECK_ITEMS['is_waiting']: lambda op_id: ( - op_id in self._operation_to_run), - - self._CHECK_ITEMS['is_canceled']: lambda op_id: ( - op_id in self._operation_to_cancel), - } - - def shutdown(self, wait=True): - self._pool.shutdown(wait) - self._operation_to_run.clear() - self._operation_to_cancel.clear() - - def cancel_operation(self, operation_id): - with self._lock: - if operation_id in self._operation_to_run: - self._operation_to_cancel.add(operation_id) - - def _check_operation(self, operation_id, check_items): - with self._lock: - return any(self._check_functions[item](operation_id) - for item in check_items) - - def _execute_operation(self, operation_id, function, param): - - def callback(f): - self._finish_operation(operation_id) - - with self._lock: - self._operation_to_run[operation_id] += 1 - - try: - f = self._pool.submit(function, operation_id, param) - f.add_done_callback(callback) - - except Exception: - self._operation_to_run[operation_id] -= 1 - LOG.exception("Submit operation(%(o_id)s) failed.", - operation_id) - - def _finish_operation(self, operation_id): - with self._lock: - self._operation_to_run[operation_id] -= 1 - if 0 == self._operation_to_run[operation_id]: - del self._operation_to_run[operation_id] - - if operation_id in self._operation_to_cancel: - self._operation_to_cancel.remove(operation_id) diff --git a/karbor/services/operationengine/engine/triggers/__init__.py b/karbor/services/operationengine/engine/triggers/__init__.py deleted file mode 100644 index a2db08c8..00000000 --- a/karbor/services/operationengine/engine/triggers/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from abc import ABCMeta -from abc import abstractmethod -import six - -from karbor import loadables - - -@six.add_metaclass(ABCMeta) -class BaseTrigger(object): - """Trigger base class that all Triggers should inherit from""" - - TRIGGER_TYPE = "" - IS_ENABLED = True - - def __init__(self, trigger_id, trigger_property, executor): - super(BaseTrigger, self).__init__() - - self._id = trigger_id - self._operation_ids = set() - self._executor = executor - - @abstractmethod - def shutdown(self): - pass - - @abstractmethod - def register_operation(self, operation_id, **kwargs): - pass - - @abstractmethod - def unregister_operation(self, operation_id, **kwargs): - pass - - @abstractmethod - def update_trigger_property(self, trigger_property): - pass - - @classmethod - @abstractmethod - def check_trigger_definition(cls, trigger_definition): - pass - - @classmethod - @abstractmethod - def check_configuration(cls): - pass - - def has_operations(self): - return (len(self._operation_ids) != 0) - - -class TriggerHandler(loadables.BaseLoader): - - def __init__(self): - super(TriggerHandler, self).__init__(BaseTrigger) - - -def all_triggers(): - """Get all trigger classes.""" - all_classes = TriggerHandler().get_all_classes() - for trigger_class in all_classes[:]: - if trigger_class.TRIGGER_TYPE == 'time' and ( - not trigger_class.IS_ENABLED): - all_classes.remove(trigger_class) - return all_classes diff --git a/karbor/services/operationengine/engine/triggers/timetrigger/__init__.py b/karbor/services/operationengine/engine/triggers/timetrigger/__init__.py deleted file mode 100644 index fa1f015e..00000000 --- a/karbor/services/operationengine/engine/triggers/timetrigger/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -time_trigger_opts = [ - cfg.IntOpt('min_interval', - default=60 * 60, - help='The minimum interval of two adjacent time points. ' - 'min_interval >= (max_window_time * 2)'), - - cfg.IntOpt('min_window_time', - default=900, - help='The minimum window time'), - - cfg.IntOpt('max_window_time', - default=1800, - help='The maximum window time'), - - cfg.StrOpt('time_format', - default='calendar', - choices=['crontab', 'calendar'], - help='The type of time format which is used to compute time'), - - cfg.IntOpt('trigger_poll_interval', - default=15, - help='Interval, in seconds, in which Karbor will poll for ' - 'trigger events'), - - cfg.StrOpt('scheduling_strategy', - default='multi_node', - help='Time trigger scheduling strategy ' - ) -] - -CONF = cfg.CONF -CONF.register_opts(time_trigger_opts) diff --git a/karbor/services/operationengine/engine/triggers/timetrigger/time_trigger.py b/karbor/services/operationengine/engine/triggers/timetrigger/time_trigger.py deleted file mode 100644 index e9e96c2a..00000000 --- a/karbor/services/operationengine/engine/triggers/timetrigger/time_trigger.py +++ /dev/null @@ -1,263 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from datetime import timedelta -import eventlet -import functools - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from karbor import exception -from karbor.i18n import _ -from karbor.services.operationengine.engine import triggers -from karbor.services.operationengine.engine.triggers.timetrigger import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class TriggerOperationGreenThread(object): - def __init__(self, first_run_time, function): - super(TriggerOperationGreenThread, self).__init__() - self._is_sleeping = True - self._pre_run_time = None - self._running = False - self._thread = None - - self._function = function - - self._start(first_run_time) - - def kill(self): - self._running = False - if self._is_sleeping: - self._thread.kill() - - @property - def running(self): - return self._running - - @property - def pre_run_time(self): - return self._pre_run_time - - def _start(self, first_run_time): - self._running = True - - now = datetime.utcnow() - initial_delay = 0 if first_run_time <= now else ( - int(timeutils.delta_seconds(now, first_run_time))) - - self._thread = eventlet.spawn_after( - initial_delay, self._run, first_run_time) - self._thread.link(self._on_done) - - def _on_done(self, gt, *args, **kwargs): - self._is_sleeping = True - self._pre_run_time = None - self._running = False - self._thread = None - - def _run(self, expect_run_time): - while self._running: - self._is_sleeping = False - self._pre_run_time = expect_run_time - - expect_run_time = self._function(expect_run_time) - if expect_run_time is None or not self._running: - break - - self._is_sleeping = True - - now = datetime.utcnow() - idle_time = 0 if expect_run_time <= now else int( - timeutils.delta_seconds(now, expect_run_time)) - eventlet.sleep(idle_time) - - -class TimeTrigger(triggers.BaseTrigger): - TRIGGER_TYPE = "time" - IS_ENABLED = (CONF.scheduling_strategy == 'default') - - def __init__(self, trigger_id, trigger_property, executor): - super(TimeTrigger, self).__init__( - trigger_id, trigger_property, executor) - - self._trigger_property = self.check_trigger_definition( - trigger_property) - - self._greenthread = None - - def shutdown(self): - self._kill_greenthread() - - def register_operation(self, operation_id, **kwargs): - if operation_id in self._operation_ids: - msg = (_("The operation_id(%s) is exist") % operation_id) - raise exception.ScheduledOperationExist(msg) - - if self._greenthread and not self._greenthread.running: - raise exception.TriggerIsInvalid(trigger_id=self._id) - - self._operation_ids.add(operation_id) - if self._greenthread is None: - self._start_greenthread() - - def unregister_operation(self, operation_id, **kwargs): - if operation_id not in self._operation_ids: - return - - self._operation_ids.remove(operation_id) - if 0 == len(self._operation_ids): - self._kill_greenthread() - - def update_trigger_property(self, trigger_property): - valid_trigger_property = self.check_trigger_definition( - trigger_property) - - if valid_trigger_property == self._trigger_property: - return - - timer = self._get_timer(valid_trigger_property) - first_run_time = self._compute_next_run_time( - datetime.utcnow(), trigger_property['end_time'], timer) - if not first_run_time: - msg = (_("The new trigger property is invalid, " - "Can not find the first run time")) - raise exception.InvalidInput(msg) - - if self._greenthread is not None: - pre_run_time = self._greenthread.pre_run_time - if pre_run_time: - end_time = pre_run_time + timedelta( - seconds=self._trigger_property['window']) - if first_run_time <= end_time: - msg = (_("The new trigger property is invalid, " - "First run time%(t1)s must be after %(t2)s") % - {'t1': first_run_time, 't2': end_time}) - raise exception.InvalidInput(msg) - - self._trigger_property = valid_trigger_property - - if len(self._operation_ids) > 0: - # Restart greenthread to take the change of trigger property - # effect immediately - self._kill_greenthread() - self._create_green_thread(first_run_time, timer) - - def _kill_greenthread(self): - if self._greenthread: - self._greenthread.kill() - self._greenthread = None - - def _start_greenthread(self): - # Find the first time. - # We don't known when using this trigger first time. - timer = self._get_timer(self._trigger_property) - first_run_time = self._compute_next_run_time( - datetime.utcnow(), self._trigger_property['end_time'], timer) - if not first_run_time: - raise exception.TriggerIsInvalid(trigger_id=self._id) - - self._create_green_thread(first_run_time, timer) - - def _create_green_thread(self, first_run_time, timer): - func = functools.partial( - self._trigger_operations, - trigger_property=self._trigger_property.copy(), - timer=timer) - - self._greenthread = TriggerOperationGreenThread( - first_run_time, func) - - def _trigger_operations(self, expect_run_time, trigger_property, timer): - """Trigger operations once - - returns: wait time for next run - """ - - # Just for robustness, actually expect_run_time always <= now - # but, if the scheduling of eventlet is not accurate, then we - # can do some adjustments. - entry_time = datetime.utcnow() - if entry_time < expect_run_time and ( - int(timeutils.delta_seconds(entry_time, expect_run_time)) > 0): - return expect_run_time - - # The self._executor.execute_operation may have I/O operation. - # If it is, this green thread will be switched out during looping - # operation_ids. In order to avoid changing self._operation_ids - # during the green thread is switched out, copy self._operation_ids - # as the iterative object. - operation_ids = self._operation_ids.copy() - sent_ops = set() - window = trigger_property.get("window") - end_time = expect_run_time + timedelta(seconds=window) - - for operation_id in operation_ids: - if operation_id not in self._operation_ids: - # Maybe, when traversing this operation_id, it has been - # removed by self.unregister_operation - LOG.warning("Execute operation %s which is not exist, " - "ignore it", operation_id) - continue - - now = datetime.utcnow() - if now >= end_time: - LOG.error("Can not trigger operations to run. Because it is " - "out of window time. now=%(now)s, " - "end time=%(end_time)s, expect run time=%(expect)s," - " wating operations=%(ops)s", - {'now': now, 'end_time': end_time, - 'expect': expect_run_time, - 'ops': operation_ids - sent_ops}) - break - - try: - self._executor.execute_operation( - operation_id, now, expect_run_time, window) - except Exception: - LOG.exception("Submit operation to executor failed, operation" - " id=%s", operation_id) - - sent_ops.add(operation_id) - - next_time = self._compute_next_run_time( - expect_run_time, trigger_property['end_time'], timer) - now = datetime.utcnow() - if next_time and next_time <= now: - LOG.error("Next run time:%(next_time)s <= now:%(now)s. Maybe the " - "entry time=%(entry)s is too late, even exceeds the end" - " time of window=%(end)s, or it was blocked where " - "sending the operation to executor.", - {'next_time': next_time, 'now': now, - 'entry': entry_time, 'end': end_time}) - return next_time - - @classmethod - def check_trigger_definition(cls, trigger_definition): - return utils.check_trigger_definition(trigger_definition) - - @classmethod - def _compute_next_run_time(cls, start_time, end_time, timer): - return utils.compute_next_run_time(start_time, end_time, timer) - - @classmethod - def _get_timer(cls, trigger_property): - return utils.get_timer(trigger_property) - - @classmethod - def check_configuration(cls): - utils.check_configuration() diff --git a/karbor/services/operationengine/engine/triggers/timetrigger/time_trigger_multi_node.py b/karbor/services/operationengine/engine/triggers/timetrigger/time_trigger_multi_node.py deleted file mode 100644 index 5ebf54e6..00000000 --- a/karbor/services/operationengine/engine/triggers/timetrigger/time_trigger_multi_node.py +++ /dev/null @@ -1,253 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from datetime import timedelta - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall - -from karbor import context as karbor_context -from karbor import db -from karbor import exception -from karbor.i18n import _ -from karbor.services.operationengine.engine import triggers -from karbor.services.operationengine.engine.triggers.timetrigger import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class TimeTrigger(triggers.BaseTrigger): - - TRIGGER_TYPE = "time" - IS_ENABLED = (CONF.scheduling_strategy == 'multi_node') - - _loopingcall = None - _triggers = {} - - def __init__(self, trigger_id, trigger_property, executor): - super(TimeTrigger, self).__init__( - trigger_id, trigger_property, executor) - - self._trigger_property = self.check_trigger_definition( - trigger_property) - - timer = self._get_timer(self._trigger_property) - first_run_time = self._compute_next_run_time( - datetime.utcnow(), self._trigger_property['end_time'], timer) - LOG.debug("first_run_time: %s", first_run_time) - - self._trigger_execution_new(self._id, first_run_time) - - if not self.__class__._loopingcall: - self.__class__._loopingcall = loopingcall.FixedIntervalLoopingCall( - self._loop) - self.__class__._loopingcall.start( - interval=CONF.trigger_poll_interval, - stop_on_exception=False, - ) - - self._register() - - def _register(self): - self.__class__._triggers[self._id] = self - - def _unregister(self): - del self.__class__._triggers[self._id] - - @classmethod - def _loop(cls): - while True: - now = datetime.utcnow() - exec_to_handle = cls._trigger_execution_get_next() - if not exec_to_handle: - LOG.debug("No next trigger executions") - break - - trigger_id = exec_to_handle.trigger_id - execution_time = exec_to_handle.execution_time - trigger = cls._triggers.get(trigger_id) - if not trigger: - LOG.warning("Unable to find trigger %s", trigger_id) - res = cls._trigger_execution_delete( - execution_id=exec_to_handle.id) - continue - - if now < execution_time: - LOG.debug("Time trigger not yet due") - break - - trigger_property = trigger._trigger_property - timer = cls._get_timer(trigger_property) - window = trigger_property.get("window") - end_time_to_run = execution_time + timedelta( - seconds=window) - - if now > end_time_to_run: - LOG.debug("Time trigger (%s) out of window",) - execute = False - else: - LOG.debug("Time trigger (%s) is due", trigger_id) - execute = True - - next_exec_time = cls._compute_next_run_time( - now, - trigger_property['end_time'], - timer, - ) - if not next_exec_time: - LOG.debug("No more planned executions for trigger (%s)", - trigger_id) - res = cls._trigger_execution_delete( - execution_id=exec_to_handle.id) - else: - LOG.debug("Rescheduling (%s) from %s to %s", - trigger_id, - execution_time, - next_exec_time) - res = cls._trigger_execution_update( - exec_to_handle.id, - execution_time, - next_exec_time, - ) - - if not res: - LOG.info("Trigger probably handled by another node") - continue - - if execute: - cls._trigger_operations(trigger_id, execution_time, window) - - @classmethod - def _trigger_execution_new(cls, trigger_id, time): - # Find the first time. - # We don't known when using this trigger first time. - ctxt = karbor_context.get_admin_context() - try: - db.trigger_execution_create(ctxt, trigger_id, time) - return True - except Exception: - return False - - @classmethod - def _trigger_execution_update(cls, id, current_time, next_time): - ctxt = karbor_context.get_admin_context() - return db.trigger_execution_update(ctxt, id, current_time, next_time) - - @classmethod - def _trigger_execution_delete(cls, execution_id=None, trigger_id=None): - if execution_id is None and trigger_id is None: - raise exception.InvalidParameterValue('supply at least one id') - - ctxt = karbor_context.get_admin_context() - num_deleted = db.trigger_execution_delete(ctxt, execution_id, - trigger_id) - return num_deleted > 0 - - @classmethod - def _trigger_execution_get_next(cls): - ctxt = karbor_context.get_admin_context() - return db.trigger_execution_get_next(ctxt) - - def shutdown(self): - self._unregister() - - def register_operation(self, operation_id, **kwargs): - if operation_id in self._operation_ids: - msg = (_("The operation_id(%s) is exist") % operation_id) - raise exception.ScheduledOperationExist(msg) - - self._operation_ids.add(operation_id) - - def unregister_operation(self, operation_id, **kwargs): - self._operation_ids.discard(operation_id) - - def update_trigger_property(self, trigger_property): - valid_trigger_property = self.check_trigger_definition( - trigger_property) - - if valid_trigger_property == self._trigger_property: - return - - timer = self._get_timer(valid_trigger_property) - first_run_time = self._compute_next_run_time( - datetime.utcnow(), valid_trigger_property['end_time'], timer) - - if not first_run_time: - msg = (_("The new trigger property is invalid, " - "Can not find the first run time")) - raise exception.InvalidInput(msg) - - self._trigger_property = valid_trigger_property - self._trigger_execution_delete(trigger_id=self._id) - self._trigger_execution_new(self._id, first_run_time) - - @classmethod - def _trigger_operations(cls, trigger_id, expect_run_time, window): - """Trigger operations once""" - - # The executor execute_operation may have I/O operation. - # If it is, this green thread will be switched out during looping - # operation_ids. In order to avoid changing self._operation_ids - # during the green thread is switched out, copy self._operation_ids - # as the iterative object. - trigger = cls._triggers.get(trigger_id) - if not trigger: - LOG.warning("Can't find trigger: %s" % trigger_id) - return - operations_ids = trigger._operation_ids.copy() - sent_ops = set() - end_time = expect_run_time + timedelta(seconds=window) - - for operation_id in operations_ids: - if operation_id not in trigger._operation_ids: - # Maybe, when traversing this operation_id, it has been - # removed by self.unregister_operation - LOG.warning("Execute operation %s which is not exist, " - "ignore it", operation_id) - continue - - now = datetime.utcnow() - if now >= end_time: - LOG.error("Can not trigger operations to run. Because it is " - "out of window time. now=%(now)s, " - "end time=%(end_time)s, waiting operations=%(ops)s", - {'now': now, 'end_time': end_time, - 'ops': operations_ids - sent_ops}) - break - - try: - trigger._executor.execute_operation( - operation_id, now, expect_run_time, window) - except Exception: - LOG.exception("Submit operation to executor failed, operation" - " id=%s", operation_id) - - sent_ops.add(operation_id) - - @classmethod - def check_trigger_definition(cls, trigger_definition): - return utils.check_trigger_definition(trigger_definition) - - @classmethod - def _compute_next_run_time(cls, start_time, end_time, timer): - return utils.compute_next_run_time(start_time, end_time, timer) - - @classmethod - def _get_timer(cls, trigger_property): - return utils.get_timer(trigger_property) - - @classmethod - def check_configuration(cls): - utils.check_configuration() diff --git a/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/__init__.py b/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/__init__.py deleted file mode 100644 index eddb7f23..00000000 --- a/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -time format base class -""" - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class TimeFormat(object): - - def __init__(self, start_time, pattern): - """Initiate time format - - :param start_time: The time points after the start_time are valid - :param pattern: The pattern of the time - - When the start_time and pattern are specified, the time points - can be calculated and are immutable. - """ - pass - - @classmethod - @abc.abstractmethod - def check_time_format(cls, pattern): - """Check time format - - :param pattern: The pattern of the time - """ - pass - - @abc.abstractmethod - def compute_next_time(self, current_time): - """Compute next time - - :param current_time: the time before the next time - """ - pass - - @abc.abstractmethod - def get_min_interval(self): - """Get minimum interval of two adjacent time points""" - pass diff --git a/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/calendar_time.py b/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/calendar_time.py deleted file mode 100644 index 8c01f0d7..00000000 --- a/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/calendar_time.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from datetime import datetime -from dateutil import rrule -from icalendar import Calendar -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from karbor import exception -from karbor.i18n import _ -from karbor.services.operationengine.engine.triggers.timetrigger import \ - timeformats - - -RATE = 2 -FREQ_TO_KWARGS = [{'days': RATE * 366}, - {'days': RATE * 31}, - {'days': RATE * 7}, - {'days': RATE}, - {'hours': RATE}, - {'minutes': RATE}, - {'seconds': RATE}] - -RREQ_MAP = {"YEARLY": 0, - "MONTHLY": 1, - "WEEKLY": 2, - "DAILY": 3, - "HOURLY": 4, - "MINUTELY": 5, - "SECONDLY": 6} - - -class ICal(timeformats.TimeFormat): - """icalendar.""" - - def __init__(self, start_time, pattern): - super(ICal, self).__init__(start_time, pattern) - cal = Calendar.from_ical(self._decode_calendar_pattern(pattern)) - vevent = cal.walk('VEVENT')[0] - self.dtstart = start_time - self.min_freq = self._get_min_freq(vevent) - self.rrule_obj = self._get_rrule_obj(vevent, start_time) - - @staticmethod - def _decode_calendar_pattern(pattern): - try: - pattern.index('\\') - pattern_dict = jsonutils.loads('{"pattern": "%s"}' % pattern) - return pattern_dict["pattern"] - except Exception: - return pattern - - @staticmethod - def _get_rrule_obj(vevent, dtstart): - rrules = vevent.get('RRULE') - rrule_list = rrules if isinstance(rrules, list) else [rrules] - rrule_str = os.linesep.join(recur.to_ical().decode("utf-8") - for recur in rrule_list) - return rrule.rrulestr(rrule_str, dtstart=dtstart, cache=False) - - @staticmethod - def _get_min_freq(vevent): - recur = vevent.decoded("RRULE") - recur_list = recur if isinstance(recur, list) else [recur] - freq_list = [] - for recur in recur_list: - for freq in recur.get("FREQ"): - freq_list.append(RREQ_MAP[freq.upper()]) - return max(freq_list) - - @classmethod - def check_time_format(cls, pattern): - """Check time format - - :param pattern: The pattern of the icalendar time - """ - try: - cal_obj = Calendar.from_ical(cls._decode_calendar_pattern(pattern)) - except Exception: - msg = (_("The trigger pattern(%s) is invalid") % pattern) - raise exception.InvalidInput(msg) - - try: - vevent = cal_obj.walk('VEVENT')[0] - except Exception: - msg = (_("The trigger pattern(%s) must include less than one " - "VEVENT component") % pattern) - raise exception.InvalidInput(msg) - - try: - vevent.decoded('RRULE') - except Exception: - msg = (_("The first VEVENT component of trigger pattern(%s) must " - "include less than one RRULE property") % pattern) - raise exception.InvalidInput(msg) - - def compute_next_time(self, current_time): - """Compute next time - - :param current_time: the time before the next time - :return: datetime or None - - """ - next_time = self.rrule_obj.after(current_time) - return next_time if next_time else None - - def get_min_interval(self): - """Get minimum interval of two adjacent time points - - :return: int(seconds) or None - - """ - try: - t1 = self.compute_next_time(datetime.now()) - t2 = self.compute_next_time(t1) - return timeutils.delta_seconds(t1, t2) - except Exception: - return None diff --git a/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/crontab_time.py b/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/crontab_time.py deleted file mode 100644 index e11467c8..00000000 --- a/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/crontab_time.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from croniter import croniter -from datetime import datetime -from oslo_utils import timeutils - -from karbor import exception -from karbor.i18n import _ -from karbor.services.operationengine.engine.triggers.timetrigger import \ - timeformats - - -class Crontab(timeformats.TimeFormat): - - def __init__(self, start_time, pattern): - self._start_time = start_time - self._pattern = pattern - super(Crontab, self).__init__(start_time, pattern) - - @classmethod - def check_time_format(cls, pattern): - if not pattern: - msg = (_("The trigger pattern is None")) - raise exception.InvalidInput(msg) - - try: - croniter(pattern) - except Exception: - msg = (_("The trigger pattern(%s) is invalid") % pattern) - raise exception.InvalidInput(msg) - - def compute_next_time(self, current_time): - time = current_time if current_time >= self._start_time else ( - self._start_time) - return croniter(self._pattern, time).get_next(datetime) - - def get_min_interval(self): - try: - t1 = self.compute_next_time(datetime.now()) - t2 = self.compute_next_time(t1) - return timeutils.delta_seconds(t1, t2) - except Exception: - return None diff --git a/karbor/services/operationengine/engine/triggers/timetrigger/utils.py b/karbor/services/operationengine/engine/triggers/timetrigger/utils.py deleted file mode 100644 index 55442ef9..00000000 --- a/karbor/services/operationengine/engine/triggers/timetrigger/utils.py +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -import six -from stevedore import driver as import_driver - -from karbor import exception -from karbor.i18n import _ - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -def get_time_format_class(): - return import_driver.DriverManager( - 'karbor.operationengine.engine.timetrigger.time_format', - CONF.time_format).driver - - -def compute_next_run_time(start_time, end_time, timer): - next_time = timer.compute_next_time(start_time) - if next_time and (not end_time or next_time <= end_time): - return next_time - return None - - -def check_and_get_datetime(time, time_name): - if not time: - return None - - if isinstance(time, datetime): - return time - - if not isinstance(time, six.string_types): - msg = (_("The trigger %(name)s(type = %(vtype)s) is " - "not an instance of string") % - {"name": time_name, "vtype": type(time)}) - raise exception.InvalidInput(msg) - - try: - time = timeutils.parse_strtime(time, fmt='%Y-%m-%d %H:%M:%S') - except Exception: - msg = (_("The format of trigger %s is not correct") % time_name) - raise exception.InvalidInput(msg) - - return time - - -def check_trigger_definition(trigger_definition): - """Check trigger definition - - All the time instances of trigger_definition are in UTC, - including start_time, end_time - """ - tf_cls = get_time_format_class() - - pattern = trigger_definition.get("pattern", None) - tf_cls.check_time_format(pattern) - - start_time = trigger_definition.get("start_time", None) - if not start_time: - msg = _("The trigger\'s start time is unknown") - raise exception.InvalidInput(msg) - start_time = check_and_get_datetime(start_time, "start_time") - - interval = tf_cls(start_time, pattern).get_min_interval() - if interval is not None and interval < CONF.min_interval: - msg = (_("The interval of two adjacent time points " - "is less than %d") % CONF.min_interval) - raise exception.InvalidInput(msg) - - window = trigger_definition.get("window", CONF.min_window_time) - if not isinstance(window, int): - try: - window = int(window) - except Exception: - msg = (_("The trigger windows(%s) is not integer") % window) - raise exception.InvalidInput(msg) - - if window < CONF.min_window_time or window > CONF.max_window_time: - msg = (_("The trigger windows %(window)d must be between " - "%(min_window)d and %(max_window)d") % - {"window": window, - "min_window": CONF.min_window_time, - "max_window": CONF.max_window_time}) - raise exception.InvalidInput(msg) - - end_time = trigger_definition.get("end_time", None) - end_time = check_and_get_datetime(end_time, "end_time") - - if end_time and end_time <= start_time: - msg = (_("The trigger's start time(%(start_time)s) is " - "bigger than end time(%(end_time)s)") % - {'start_time': start_time, 'end_time': end_time}) - LOG.error(msg) - raise exception.InvalidInput(msg) - valid_trigger_property = trigger_definition.copy() - valid_trigger_property['window'] = window - valid_trigger_property['start_time'] = start_time - valid_trigger_property['end_time'] = end_time - return valid_trigger_property - - -def check_configuration(): - min_window = CONF.min_window_time - max_window = CONF.max_window_time - min_interval = CONF.min_interval - - if not (min_window < max_window and (max_window * 2 <= min_interval)): - msg = (_('Configurations of time trigger are invalid')) - raise exception.InvalidInput(msg) - - -def get_timer(trigger_property): - tf_cls = get_time_format_class() - timer = tf_cls(trigger_property['start_time'], - trigger_property['pattern']) - return timer diff --git a/karbor/services/operationengine/engine/triggers/trigger_manager.py b/karbor/services/operationengine/engine/triggers/trigger_manager.py deleted file mode 100644 index 7961b404..00000000 --- a/karbor/services/operationengine/engine/triggers/trigger_manager.py +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Manage all triggers. -""" - -from karbor import exception -from karbor.i18n import _ -from karbor.services.operationengine.engine import triggers as all_triggers - - -class TriggerManager(object): - """Manage all trigger classes which are defined at triggers dir.""" - - def __init__(self, executor): - super(TriggerManager, self).__init__() - - all_cls = all_triggers.all_triggers() - self._trigger_cls_map = {cls.TRIGGER_TYPE: - cls for cls in all_cls} - - for t, cls in self._trigger_cls_map.items(): - cls.check_configuration() - - # self._trigger_obj_map = { - # trigger_id: trigger, - # } - self._trigger_obj_map = {} - - self._executor = executor - - def shutdown(self): - - for trigger_id, trigger in self._trigger_obj_map.items(): - trigger.shutdown() - - self._trigger_obj_map.clear() - self._trigger_cls_map.clear() - - if self._executor: - self._executor.shutdown() - self._executor = None - - def check_trigger_definition(self, trigger_type, trigger_definition): - """Check trigger definition - - :param trigger_type: Type of trigger - :param trigger_definition: Definition of trigger - """ - - trigger_cls = self._get_trigger_class(trigger_type) - trigger_cls.check_trigger_definition(trigger_definition) - - def add_trigger(self, trigger_id, trigger_type, trigger_property): - if trigger_id in self._trigger_obj_map: - msg = (_("Trigger id(%s) is exist") % trigger_id) - raise exception.InvalidInput(msg) - - trigger_cls = self._get_trigger_class(trigger_type) - trigger = trigger_cls(trigger_id, trigger_property, self._executor) - self._trigger_obj_map[trigger_id] = trigger - - def remove_trigger(self, trigger_id): - trigger = self._trigger_obj_map.get(trigger_id, None) - if not trigger: - raise exception.TriggerNotFound(id=trigger_id) - - if trigger.has_operations(): - raise exception.DeleteTriggerNotAllowed(trigger_id=trigger_id) - - trigger.shutdown() - del self._trigger_obj_map[trigger_id] - - def update_trigger(self, trigger_id, trigger_property): - trigger = self._trigger_obj_map.get(trigger_id, None) - if not trigger: - raise exception.TriggerNotFound(id=trigger_id) - - trigger.update_trigger_property(trigger_property) - - def register_operation(self, trigger_id, operation_id, **kwargs): - """Register operation definition. - - :param trigger_id: The ID of the trigger which - the operation is registered to - :param operation_id: ID of the operation - :param kwargs: Any parameters - :raise InvalidInput: if the trigger_type is invalid or - other exceptionis register_operation of trigger raises - """ - trigger = self._trigger_obj_map.get(trigger_id, None) - if not trigger: - raise exception.TriggerNotFound(id=trigger_id) - - trigger.register_operation(operation_id, **kwargs) - - if kwargs.get('resume'): - self._executor.resume_operation(operation_id, **kwargs) - - def unregister_operation(self, trigger_id, operation_id, **kwargs): - """Unregister operation. - - :param trigger_id: The ID of the trigger which - the operation is registered to - :param operation_id: ID of the operation - :raise InvalidInput: if the trigger_type is invalid or - other exceptionis unregister_operation of trigger raises - """ - trigger = self._trigger_obj_map.get(trigger_id, None) - if not trigger: - raise exception.TriggerNotFound(id=trigger_id) - - trigger.unregister_operation(operation_id, **kwargs) - self._executor.cancel_operation(operation_id) - - def _get_trigger_class(self, trigger_type): - cls = self._trigger_cls_map.get(trigger_type, None) - if not cls: - msg = (_("Invalid trigger type:%s") % trigger_type) - raise exception.InvalidInput(msg) - - return cls diff --git a/karbor/services/operationengine/karbor_client.py b/karbor/services/operationengine/karbor_client.py deleted file mode 100644 index 060f0980..00000000 --- a/karbor/services/operationengine/karbor_client.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import config -from karbor.common import karbor_keystone_plugin -from karborclient import client as karbor_client - -LOG = logging.getLogger(__name__) - -CONFIG_GROUP = 'karbor_client' -CONF = cfg.CONF -CONF.register_opts(config.service_client_opts, - group=CONFIG_GROUP) - - -def get_karbor_endpoint(): - try: - sc_cfg = CONF[CONFIG_GROUP] - kc_plugin = karbor_keystone_plugin.KarborKeystonePlugin() - url = kc_plugin.get_service_endpoint( - sc_cfg.service_name, sc_cfg.service_type, - sc_cfg.region_id, sc_cfg.interface) - - return url.replace("$(", "%(") - except Exception: - raise - - -def create(context, **kwargs): - endpoint = kwargs.get('endpoint') - if not endpoint: - endpoint = get_karbor_endpoint() % {"project_id": context.project_id} - - LOG.debug("Creating karbor client with url %s.", endpoint) - - sc_cfg = CONF[CONFIG_GROUP] - args = { - 'version': sc_cfg.version, - 'endpoint': endpoint, - 'token': context.auth_token, - 'cacert': sc_cfg.ca_cert_file, - 'insecure': sc_cfg.auth_insecure, - } - - return karbor_client.Client(**args) diff --git a/karbor/services/operationengine/manager.py b/karbor/services/operationengine/manager.py deleted file mode 100644 index c24427d7..00000000 --- a/karbor/services/operationengine/manager.py +++ /dev/null @@ -1,246 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -OperationEngine Service -""" - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from stevedore import driver as import_driver - -from karbor.common import constants -from karbor import context as karbor_context -from karbor import exception -from karbor import manager -from karbor import objects -from karbor.services.operationengine.engine.triggers import trigger_manager -from karbor.services.operationengine import operation_manager -from karbor.services.operationengine import user_trust_manager - - -LOG = logging.getLogger(__name__) - - -trigger_manager_opts = [ - cfg.StrOpt('executor', - default='green_thread', - choices=['thread_pool', 'green_thread'], - help='The name of executor which is used to run operations') -] - -cfg.CONF.register_opts(trigger_manager_opts, 'operationengine') - - -class OperationEngineManager(manager.Manager): - """karbor OperationEngine Manager.""" - - RPC_API_VERSION = '1.0' - - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, service_name=None, - *args, **kwargs): - super(OperationEngineManager, self).__init__(*args, **kwargs) - self._service_id = None - self._trigger_manager = None - self._user_trust_manager = None - self._operation_manager = None - self._executor = None - - @property - def operation_manager(self): - if not self._operation_manager: - self._operation_manager = operation_manager.OperationManager( - self.user_trust_manager - ) - return self._operation_manager - - @property - def executor(self): - if not self._executor: - executor_cls = import_driver.DriverManager( - 'karbor.operationengine.engine.executor', - cfg.CONF.operationengine.executor).driver - self._executor = executor_cls(self.operation_manager) - return self._executor - - @property - def user_trust_manager(self): - if not self._user_trust_manager: - self._user_trust_manager = user_trust_manager.UserTrustManager() - return self._user_trust_manager - - @property - def trigger_manager(self): - if not self._trigger_manager: - self._trigger_manager = trigger_manager.TriggerManager( - self.executor - ) - return self._trigger_manager - - def init_host(self, **kwargs): - self._service_id = kwargs.get("service_id") - self._restore() - - def cleanup_host(self): - if self._trigger_manager: - self._trigger_manager.shutdown() - self._trigger_manager = None - - def _restore(self): - self._restore_triggers() - self._restore_operations() - - def _restore_triggers(self): - limit = 100 - marker = None - filters = {} - ctxt = karbor_context.get_admin_context() - while True: - triggers = objects.TriggerList.get_by_filters( - ctxt, filters, limit, marker) - if not triggers: - break - - for trigger in triggers: - self.trigger_manager.add_trigger(trigger.id, trigger.type, - trigger.properties) - if len(triggers) < limit: - break - marker = triggers[-1].id - - def _restore_operations(self): - limit = 100 - marker = None - filters = {"service_id": self._service_id, - "state": [constants.OPERATION_STATE_REGISTERED, - constants.OPERATION_STATE_RUNNING]} - columns_to_join = ['operation'] - ctxt = karbor_context.get_admin_context() - resume_states = [constants.OPERATION_STATE_RUNNING, ] - while True: - states = objects.ScheduledOperationStateList.get_by_filters( - ctxt, filters, limit, marker, columns_to_join=columns_to_join) - if not states: - break - - for state in states: - operation = state.operation - if not operation.enabled: - continue - - resume = (state.state in resume_states) - self.trigger_manager.register_operation( - operation.trigger_id, operation.id, - resume=resume, end_time_for_run=state.end_time_for_run) - - self.user_trust_manager.resume_operation( - operation.id, operation.user_id, - operation.project_id, state.trust_id) - if len(states) < limit: - break - marker = states[-1].id - - @messaging.expected_exceptions(exception.TriggerNotFound, - exception.InvalidInput, - exception.TriggerIsInvalid, - exception.AuthorizationFailure, - exception.ScheduledOperationExist, - exception.InvalidOperationDefinition) - def create_scheduled_operation(self, context, operation): - LOG.debug("Create scheduled operation.") - self.operation_manager.check_operation_definition( - operation.operation_type, - operation.operation_definition, - ) - - # register operation - self.trigger_manager.register_operation(operation.trigger_id, - operation.id) - trust_id = self.user_trust_manager.add_operation( - context, operation.id) - - # create ScheduledOperationState record - state_info = { - "operation_id": operation.id, - "service_id": self._service_id, - "trust_id": trust_id, - "state": constants.OPERATION_STATE_REGISTERED - } - operation_state = objects.ScheduledOperationState( - context, **state_info) - try: - operation_state.create() - except Exception: - self.trigger_manager.unregister_operation( - operation.trigger_id, operation.id) - raise - - @messaging.expected_exceptions(exception.ScheduledOperationStateNotFound, - exception.TriggerNotFound) - def delete_scheduled_operation(self, context, operation_id, trigger_id): - LOG.debug("Delete scheduled operation.") - - operation_state = objects.ScheduledOperationState.get_by_operation_id( - context, operation_id) - if constants.OPERATION_STATE_DELETED != operation_state.state: - operation_state.state = constants.OPERATION_STATE_DELETED - operation_state.save() - - self.trigger_manager.unregister_operation(trigger_id, operation_id) - self.user_trust_manager.delete_operation(context, operation_id) - - @messaging.expected_exceptions(exception.TriggerNotFound) - def suspend_scheduled_operation(self, context, operation_id, trigger_id): - LOG.debug("Suspend scheduled operation.") - self.trigger_manager.unregister_operation(trigger_id, operation_id) - - @messaging.expected_exceptions(exception.TriggerNotFound, - exception.TriggerIsInvalid) - def resume_scheduled_operation(self, context, operation_id, trigger_id): - LOG.debug("Resume scheduled operation.") - - try: - self.trigger_manager.register_operation( - trigger_id, operation_id) - except exception.ScheduledOperationExist: - pass - except Exception: - raise - - @messaging.expected_exceptions(exception.InvalidInput) - def verify_trigger(self, context, trigger): - LOG.debug('Verifying trigger (id: "%s" type: "%s")', - trigger.id, trigger.type) - self.trigger_manager.check_trigger_definition( - trigger.type, trigger.properties) - - @messaging.expected_exceptions(exception.InvalidInput) - def create_trigger(self, context, trigger): - LOG.debug('Creating trigger (id: "%s" type: "%s")', - trigger.id, trigger.type) - self.trigger_manager.add_trigger(trigger.id, trigger.type, - trigger.properties) - - @messaging.expected_exceptions(exception.TriggerNotFound, - exception.DeleteTriggerNotAllowed) - def delete_trigger(self, context, trigger_id): - LOG.debug('Deleting trigger (id: "%s")', trigger_id) - self.trigger_manager.remove_trigger(trigger_id) - - @messaging.expected_exceptions(exception.TriggerNotFound, - exception.InvalidInput) - def update_trigger(self, context, trigger): - LOG.debug('Updating trigger (id: "%s")', trigger.id) - self.trigger_manager.update_trigger(trigger.id, trigger.properties) diff --git a/karbor/services/operationengine/operation_manager.py b/karbor/services/operationengine/operation_manager.py deleted file mode 100644 index 32f27772..00000000 --- a/karbor/services/operationengine/operation_manager.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Manage all operations. -""" - -from karbor import exception -from karbor.i18n import _ -from karbor.services.operationengine import operations - - -class OperationManager(object): - """Manage all operation classes which are defined at operations dir.""" - def __init__(self, user_trust_manager): - super(OperationManager, self).__init__() - self._user_trust_manager = user_trust_manager - all_ops = operations.all_operations() - self._ops_map = {op.OPERATION_TYPE: op(self._user_trust_manager) - for op in all_ops} - - def _get_operation(self, operation_type): - if operation_type not in self._ops_map: - msg = (_("Invalid operation type: %s") % operation_type) - raise exception.InvalidInput(msg) - - return self._ops_map[operation_type] - - def check_operation_definition(self, operation_type, operation_definition): - """Check operation definition. - - :param operation_type: the type of operation - :param operation_definition: the definition of operation - :raise InvalidInput: if the operation_type is invalid or - InvalidOperationDefinition if operation_definition is invalid - """ - op = self._get_operation(operation_type) - op.check_operation_definition(operation_definition) - - def run_operation(self, operation_type, operation_definition, **kwargs): - """Run operation. - - :param operation_type: the type of operation - :param operation_definition: the definition of operation - :raise InvalidInput: if the operation_type is invalid. - """ - op = self._get_operation(operation_type) - op.run(operation_definition, **kwargs) diff --git a/karbor/services/operationengine/operations/__init__.py b/karbor/services/operationengine/operations/__init__.py deleted file mode 100644 index 56740dd3..00000000 --- a/karbor/services/operationengine/operations/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Operation classes -""" - -from karbor import loadables -from karbor.services.operationengine.operations import base - - -class OperationHandler(loadables.BaseLoader): - - def __init__(self): - super(OperationHandler, self).__init__(base.Operation) - - -def all_operations(): - """Get all operation classes.""" - return OperationHandler().get_all_classes() diff --git a/karbor/services/operationengine/operations/base.py b/karbor/services/operationengine/operations/base.py deleted file mode 100644 index d75ff498..00000000 --- a/karbor/services/operationengine/operations/base.py +++ /dev/null @@ -1,200 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Operation classes -""" -import abc -import six - -from abc import ABCMeta -from datetime import datetime -from datetime import timedelta -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import constants -from karbor import context -from karbor import objects -from karbor.services.operationengine import karbor_client - - -record_operation_log_executor_opts = [ - cfg.IntOpt( - 'retained_operation_log_number', - default=5, - help='The number of retained operation log') -] - -CONF = cfg.CONF -CONF.register_opts(record_operation_log_executor_opts) - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(ABCMeta) -class Operation(object): - OPERATION_TYPE = "" - - def __init__(self, user_trust_manager): - super(Operation, self).__init__() - self._user_trust_manager = user_trust_manager - self._karbor_endpoint = None - - @abc.abstractmethod - def check_operation_definition(self, operation_definition): - """Check operation definition - - :param operation_definition: the definition of operation - """ - pass - - @property - def karbor_endpoint(self): - if not self._karbor_endpoint: - self._karbor_endpoint = karbor_client.get_karbor_endpoint() - return self._karbor_endpoint - - def run(self, operation_definition, **kwargs): - param = kwargs.get('param') - operation_id = param.get('operation_id') - window = param.get('window_time') - end_time = param['expect_start_time'] + timedelta(seconds=window) - is_operation_expired = datetime.utcnow() > end_time - - if constants.OPERATION_RUN_TYPE_RESUME == param['run_type']: - log_ref = self._get_operation_log( - operation_id, constants.OPERATION_EXE_STATE_IN_PROGRESS) - - if log_ref is None or len(log_ref) > 1: - return - - if 1 == len(log_ref): - log = log_ref[0] - if is_operation_expired: - self._update_log_when_operation_finished( - log, - constants.OPERATION_EXE_STATE_DROPPED_OUT_OF_WINDOW) - else: - self._resume(operation_definition, param, log) - - self._delete_oldest_operation_log(operation_id) - return - - if is_operation_expired: - log_info = { - 'state': constants.OPERATION_EXE_STATE_DROPPED_OUT_OF_WINDOW, - 'end_time': datetime.utcnow() - } - log_ref = self._create_operation_log(param, log_info) - else: - self._execute(operation_definition, param) - - self._delete_oldest_operation_log(operation_id) - - @abc.abstractmethod - def _execute(self, operation_definition, param): - """Execute operation. - - :param operation_definition: the definition of operation - :param param: dict, other parameters - """ - pass - - @abc.abstractmethod - def _resume(self, operation_definition, param, log_ref): - """Resume operation. - - :param operation_definition: the definition of operation - :param param: dict, other parameters - :param log_ref: instance of ScheduledOperationLog - """ - pass - - def _create_operation_log(self, param, updated_log_info=None): - log_info = { - 'operation_id': param['operation_id'], - 'expect_start_time': param['expect_start_time'], - 'triggered_time': param['triggered_time'], - 'actual_start_time': datetime.utcnow(), - 'state': constants.OPERATION_EXE_STATE_IN_PROGRESS - } - if updated_log_info: - log_info.update(updated_log_info) - - log_ref = objects.ScheduledOperationLog(context.get_admin_context(), - **log_info) - try: - log_ref.create() - except Exception: - LOG.exception("Execute operation(%s), create log obj failed", - param['operation_id']) - return - return log_ref - - def _delete_oldest_operation_log(self, operation_id): - # delete the oldest logs to keep the number of logs - # in a reasonable range - try: - objects.ScheduledOperationLog.destroy_oldest( - context.get_admin_context(), operation_id, - CONF.retained_operation_log_number) - except Exception: - pass - - def _update_operation_log(self, log_ref, updates): - if not log_ref: - return - - for item in updates: - setattr(log_ref, item, updates.get(item)) - try: - log_ref.save() - except Exception: - LOG.exception("Execute operation(%s), save log failed", - log_ref.operation_id) - - def _update_log_when_operation_finished(self, log_ref, state, - updated_log_info=None): - if not log_ref: - return - - updates = { - 'state': state, - 'end_time': datetime.utcnow() - } - if updated_log_info: - updates.update(updated_log_info) - - self._update_operation_log(log_ref, updates) - - def _get_operation_log(self, operation_id, operation_state): - try: - logs = objects.ScheduledOperationLogList.get_by_filters( - context.get_admin_context(), - {'state': operation_state, - 'operation_id': operation_id}, limit=2) - - return logs.objects - except Exception: - pass - - def _create_karbor_client(self, user_id, project_id): - token = self._user_trust_manager.get_token(user_id, project_id) - if not token: - return None - ctx = context.get_admin_context() - ctx.auth_token = token - ctx.project_id = project_id - - karbor_url = self.karbor_endpoint % {"project_id": project_id} - return karbor_client.create(ctx, endpoint=karbor_url) diff --git a/karbor/services/operationengine/operations/protect_operation.py b/karbor/services/operationengine/operations/protect_operation.py deleted file mode 100644 index 8b1ee720..00000000 --- a/karbor/services/operationengine/operations/protect_operation.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import uuidutils - -from karbor.common import constants -from karbor import context -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.services.operationengine.operations import base - -LOG = logging.getLogger(__name__) - - -class ProtectOperation(base.Operation): - """Protect operation.""" - - OPERATION_TYPE = "protect" - - def check_operation_definition(self, operation_definition): - provider_id = operation_definition.get("provider_id") - if not provider_id or not uuidutils.is_uuid_like(provider_id): - reason = _("Provider_id is invalid") - raise exception.InvalidOperationDefinition(reason=reason) - - plan_id = operation_definition.get("plan_id") - if not plan_id or not uuidutils.is_uuid_like(plan_id): - reason = _("Plan_id is invalid") - raise exception.InvalidOperationDefinition(reason=reason) - - plan = objects.Plan.get_by_id(context.get_admin_context(), plan_id) - if provider_id != plan.provider_id: - reason = _("Provider_id is conflict") - raise exception.InvalidOperationDefinition(reason=reason) - - def _execute(self, operation_definition, param): - LOG.debug("_execute operation starting") - log_ref = self._create_operation_log(param) - self._run(operation_definition, param, log_ref) - - def _resume(self, operation_definition, param, log_ref): - LOG.debug("_resume operation starting") - self._run(operation_definition, param, log_ref) - - def _run(self, operation_definition, param, log_ref): - client = self._create_karbor_client( - param.get("user_id"), param.get("project_id")) - provider_id = operation_definition.get("provider_id") - plan_id = operation_definition.get("plan_id") - trigger_id = param.get("trigger_id", None) - scheduled_operation_id = param.get("scheduled_operation_id", None) - extra_info = { - 'created_by': constants.OPERATION_ENGINE, - 'trigger_id': trigger_id, - 'scheduled_operation_id': scheduled_operation_id - } - LOG.debug("Create checkpoint: provider_id=%(provider_id)s, " - "plan_id=%(plan_id)s, trigger_id=%(trigger_id)s, " - "scheduled_operation_id=%(scheduled_operation_id)s" % - {"provider_id": provider_id, - "plan_id": plan_id, - "trigger_id": trigger_id, - "scheduled_operation_id": scheduled_operation_id}) - try: - client.checkpoints.create(provider_id, plan_id, extra_info) - except Exception: - state = constants.OPERATION_EXE_STATE_FAILED - else: - state = constants.OPERATION_EXE_STATE_SUCCESS - - LOG.debug("Create checkpoint finished, state=%s" % state) - self._update_log_when_operation_finished(log_ref, state) diff --git a/karbor/services/operationengine/operations/retention_operation.py b/karbor/services/operationengine/operations/retention_operation.py deleted file mode 100644 index 990075ab..00000000 --- a/karbor/services/operationengine/operations/retention_operation.py +++ /dev/null @@ -1,228 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from oslo_log import log as logging -from oslo_utils import uuidutils - -from karbor.common import constants -from karbor import context -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.services.operationengine.operations import base - -LOG = logging.getLogger(__name__) - - -class RetentionProtectOperation(base.Operation): - """Protect operation.""" - - OPERATION_TYPE = "retention_protect" - - def check_operation_definition(self, operation_definition): - provider_id = operation_definition.get("provider_id") - if not provider_id or not uuidutils.is_uuid_like(provider_id): - reason = _("Provider_id is invalid") - raise exception.InvalidOperationDefinition(reason=reason) - - plan_id = operation_definition.get("plan_id") - if not plan_id or not uuidutils.is_uuid_like(plan_id): - reason = _("Plan_id is invalid") - raise exception.InvalidOperationDefinition(reason=reason) - - plan = objects.Plan.get_by_id(context.get_admin_context(), plan_id) - if provider_id != plan.provider_id: - reason = _("Provider_id is conflict") - raise exception.InvalidOperationDefinition(reason=reason) - - def _execute(self, operation_definition, param): - LOG.debug("_execute operation starting") - log_ref = self._create_operation_log(param) - self._run(operation_definition, param, log_ref) - - def _resume(self, operation_definition, param, log_ref): - LOG.debug("_resume operation starting") - self._run(operation_definition, param, log_ref) - - def _run(self, operation_definition, param, log_ref): - project_id = param.get("project_id") - client = self._create_karbor_client( - param.get("user_id"), project_id) - provider_id = operation_definition.get("provider_id") - plan_id = operation_definition.get("plan_id") - trigger_id = param.get("trigger_id", None) - scheduled_operation_id = param.get("scheduled_operation_id", None) - extra_info = { - 'created_by': constants.OPERATION_ENGINE, - 'trigger_id': trigger_id, - 'scheduled_operation_id': scheduled_operation_id - } - LOG.debug("Create checkpoint: provider_id=%(provider_id)s, " - "plan_id=%(plan_id)s, trigger_id=%(trigger_id)s, " - "scheduled_operation_id=%(scheduled_operation_id)s" % - {"provider_id": provider_id, - "plan_id": plan_id, - "trigger_id": trigger_id, - "scheduled_operation_id": scheduled_operation_id}) - try: - client.checkpoints.create(provider_id, plan_id, extra_info) - except Exception: - state = constants.OPERATION_EXE_STATE_FAILED - else: - state = constants.OPERATION_EXE_STATE_SUCCESS - - finally: - LOG.debug("Create checkpoint finished, state=%s" % state) - self._update_log_when_operation_finished(log_ref, state) - - try: - max_backups = int(operation_definition.get("max_backups", -1)) - max_backups = -1 if max_backups <= 0 else max_backups - except Exception: - state = constants.OPERATION_GET_MAX_BACKUP_STATE_FAILED - self._update_log_when_operation_finished(log_ref, state) - reason = _("Failed to get max_backups") - raise exception.InvalidOperationDefinition(reason=reason) - - try: - retention_duration = int(operation_definition.get( - "retention_duration", -1)) - retention_duration = -1 if retention_duration <= 0\ - else retention_duration - except Exception: - state = constants.OPERATION_GET_DURATION_STATE_FAILED - self._update_log_when_operation_finished(log_ref, state) - reason = _("Failed to get retention_duration") - raise exception.InvalidOperationDefinition(reason=reason) - - try: - self._delete_old_backup_by_max_backups( - client, max_backups, project_id, provider_id, plan_id) - state = constants.OPERATION_EXE_MAX_BACKUP_STATE_SUCCESS - except Exception: - state = constants.OPERATION_EXE_MAX_BACKUP_STATE_FAILED - reason = (_("Can't execute retention policy provider_id: " - "%(provider_id)s plan_id:%(plan_id)s" - " max_backups:%(max_backups)s") % - {"provider_id": provider_id, "plan_id": plan_id, - "max_backups": max_backups}) - raise exception.InvalidOperationDefinition(reason=reason) - finally: - LOG.debug("Delete old backup by max_backups finished, " - "state=%(state)s, max_backups:%(max_backups)s" % - {"state": state, "max_backups": max_backups}) - self._update_log_when_operation_finished(log_ref, state) - - try: - self._delete_old_backup_by_duration( - client, retention_duration, project_id, provider_id, plan_id) - state = constants.OPERATION_EXE_DURATION_STATE_SUCCESS - except Exception: - state = constants.OPERATION_EXE_DURATION_STATE_FAILED - reason = (_("Can't execute retention policy provider_id: " - "%(provider_id)s plan_id:%(plan_id)s" - " retention_duration:%(retention_duration)s") % - {"provider_id": provider_id, "plan_id": plan_id, - "retention_duration": retention_duration}) - raise exception.InvalidOperationDefinition(reason=reason) - finally: - LOG.debug("Delete old backup by duration finished, " - "state=%(state)s, " - "retention_duration:%(retention_duration)s" % - {"state": state, - "retention_duration": retention_duration}) - self._update_log_when_operation_finished(log_ref, state) - - @staticmethod - def _list_available_checkpoint(client, project_id, - provider_id, plan_id): - search_opts = {'project_id': project_id, - 'plan_id': plan_id, - "status": constants.CHECKPOINT_STATUS_AVAILABLE - } - sort = {"created_at": "desc"} - try: - checkpoints = client.checkpoints.list( - provider_id=provider_id, - search_opts=search_opts, - limit=None, - sort=sort) - avi_check = [x for x in checkpoints if x.status == - constants.CHECKPOINT_STATUS_AVAILABLE] - except Exception as e: - reason = (_("Failed to list checkpoint by %(provider_id)s" - "and %(plan_id)s reason: %(reason)s") % - {"provider_id": provider_id, - "plan_id": plan_id, "reason": e}) - raise exception.InvalidOperationDefinition(reason=reason) - - return avi_check - - def _delete_old_backup_by_max_backups( - self, client, max_backups, project_id, provider_id, plan_id): - - if max_backups == -1: - return - - backup_items = self._list_available_checkpoint( - client, project_id, provider_id, plan_id) - - LOG.debug("Delete checkpoint: max_backups=%(max_backups)s, " - "project_id=%(project_id)s, provider_id=%(provider_id)s, " - "plan_id=%(plan_id)s" % - {"max_backups": max_backups, - "project_id": project_id, - "provider_id": provider_id, - "plan_id": plan_id}) - count = len(backup_items) - if count > max_backups: - for item in backup_items[max_backups:]: - try: - client.checkpoints.delete(provider_id, item.id) - except Exception as e: - reason = (_("Failed to delete checkpoint: %(cp_id)s by " - "max_backups with the reason: %(reason)s") % - {"cp_id": item.id, "reason": e}) - raise exception.InvalidOperationDefinition(reason=reason) - - def _delete_old_backup_by_duration( - self, client, retention_duration, - project_id, provider_id, plan_id): - - if retention_duration == -1: - return - - backup_items = self._list_available_checkpoint( - client, project_id, provider_id, plan_id) - - LOG.debug("Delete checkpoint: " - "retention_duration=%(retention_duration)s, " - "project_id=%(project_id)s, provider_id=%(provider_id)s, " - "plan_id=%(plan_id)s" % - {"retention_duration": retention_duration, - "project_id": project_id, - "provider_id": provider_id, - "plan_id": plan_id}) - now = datetime.utcnow() - for item in backup_items: - created_at = datetime.strptime(item.created_at, "%Y-%m-%d") - interval = (now - created_at).days - if interval > retention_duration: - try: - client.checkpoints.delete(provider_id, item.id) - except Exception as e: - reason = (_("Failed to delete checkpoint: %(cp_id)s " - "by retention_duration with the reasion: " - "%(reason)s") % - {"cp_id": item.id, "reason": e}) - raise exception.InvalidOperationDefinition(reason=reason) diff --git a/karbor/services/operationengine/rpcapi.py b/karbor/services/operationengine/rpcapi.py deleted file mode 100644 index 8db48845..00000000 --- a/karbor/services/operationengine/rpcapi.py +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Client side of the OperationEngine manager RPC API. -""" - -from oslo_config import cfg -import oslo_messaging as messaging - -from karbor.objects import base as objects_base -from karbor import rpc - - -CONF = cfg.CONF - - -class OperationEngineAPI(object): - """Client side of the OperationEngine rpc API. - - API version history: - - 1.0 - Initial version. - """ - - RPC_API_VERSION = '1.0' - - def __init__(self): - super(OperationEngineAPI, self).__init__() - target = messaging.Target(topic=CONF.operationengine_topic, - version=self.RPC_API_VERSION) - serializer = objects_base.KarborObjectSerializer() - - client = rpc.get_client(target, version_cap=None, - serializer=serializer) - self._client = client.prepare(version='1.0') - - def create_scheduled_operation(self, ctxt, operation): - return self._client.call(ctxt, 'create_scheduled_operation', - operation=operation) - - def delete_scheduled_operation(self, ctxt, operation_id, trigger_id): - return self._client.call(ctxt, 'delete_scheduled_operation', - operation_id=operation_id, - trigger_id=trigger_id) - - def suspend_scheduled_operation(self, ctxt, operation_id, trigger_id): - return self._client.call(ctxt, 'suspend_scheduled_operation', - operation_id=operation_id, - trigger_id=trigger_id) - - def resume_scheduled_operation(self, ctxt, operation_id, trigger_id): - return self._client.call(ctxt, 'resume_scheduled_operation', - operation_id=operation_id, - trigger_id=trigger_id) - - def verify_trigger(self, ctxt, trigger): - return self._client.call(ctxt, 'verify_trigger', trigger=trigger) - - def create_trigger(self, ctxt, trigger): - self._client.prepare(fanout=True).cast(ctxt, 'create_trigger', - trigger=trigger) - - def delete_trigger(self, ctxt, trigger_id): - self._client.prepare(fanout=True).cast(ctxt, 'delete_trigger', - trigger_id=trigger_id) - - def update_trigger(self, ctxt, trigger): - self._client.prepare(fanout=True).cast(ctxt, 'update_trigger', - trigger=trigger) diff --git a/karbor/services/operationengine/user_trust_manager.py b/karbor/services/operationengine/user_trust_manager.py deleted file mode 100644 index a9fea1cd..00000000 --- a/karbor/services/operationengine/user_trust_manager.py +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from karbor.common import karbor_keystone_plugin - - -LOG = logging.getLogger(__name__) - - -class UserTrustManager(object): - def __init__(self): - super(UserTrustManager, self).__init__() - self._user_trust_map = {} - self._skp = karbor_keystone_plugin.KarborKeystonePlugin() - - def _user_trust_key(self, user_id, project_id): - return "%s_%s" % (user_id, project_id) - - def _add_user_trust_info(self, user_id, project_id, - operation_id, trust_id, session): - key = self._user_trust_key(user_id, project_id) - self._user_trust_map[key] = { - 'operation_ids': {operation_id}, - 'trust_id': trust_id, - 'session': session - } - - def _get_user_trust_info(self, user_id, project_id): - return self._user_trust_map.get( - self._user_trust_key(user_id, project_id)) - - def _del_user_trust_info(self, user_id, project_id): - key = self._user_trust_key(user_id, project_id) - del self._user_trust_map[key] - - def get_token(self, user_id, project_id): - auth_info = self._get_user_trust_info(user_id, project_id) - if not auth_info: - return None - - try: - return auth_info['session'].get_token() - except Exception: - LOG.exception("Get token failed, user_id=%(user_id)s, " - "project_id=%(proj_id)s", - {'user_id': user_id, 'proj_id': project_id}) - return None - - def add_operation(self, context, operation_id): - auth_info = self._get_user_trust_info( - context.user_id, context.project_id) - if auth_info: - auth_info['operation_ids'].add(operation_id) - return auth_info['trust_id'] - - trust_id = self._skp.create_trust_to_karbor(context) - try: - lsession = self._skp.create_trust_session(trust_id) - except Exception: - self._skp.delete_trust_to_karbor(trust_id) - raise - - self._add_user_trust_info(context.user_id, context.project_id, - operation_id, trust_id, lsession) - - return trust_id - - def delete_operation(self, context, operation_id): - auth_info = self._get_user_trust_info( - context.user_id, context.project_id) - if not auth_info: - return - - operation_ids = auth_info['operation_ids'] - operation_ids.discard(operation_id) - if len(operation_ids) == 0: - self._skp.delete_trust_to_karbor(auth_info['trust_id']) - self._del_user_trust_info(context.user_id, context.project_id) - - def resume_operation(self, operation_id, user_id, project_id, trust_id): - auth_info = self._get_user_trust_info(user_id, project_id) - if auth_info: - auth_info['operation_ids'].add(operation_id) - return - - try: - lsession = self._skp.create_trust_session(trust_id) - except Exception: - raise - - self._add_user_trust_info(user_id, project_id, - operation_id, trust_id, lsession) diff --git a/karbor/services/protection/__init__.py b/karbor/services/protection/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/api.py b/karbor/services/protection/api.py deleted file mode 100644 index 89b67826..00000000 --- a/karbor/services/protection/api.py +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Handles all requests relating to protection service.""" - - -from karbor.db import base -from karbor.services.protection import rpcapi as protection_rpcapi - - -class API(base.Base): - """API for interacting with the protection manager.""" - - def __init__(self, db_driver=None): - self.protection_rpcapi = protection_rpcapi.ProtectionAPI() - super(API, self).__init__(db_driver) - - def restore(self, context, restore, restore_auth): - return self.protection_rpcapi.restore(context, restore, restore_auth) - - def verification(self, context, verification): - return self.protection_rpcapi.verification(context, verification) - - def protect(self, context, plan, checkpoint_properties): - return self.protection_rpcapi.protect(context, plan, - checkpoint_properties) - - def copy(self, context, plan): - return self.protection_rpcapi.copy(context, plan) - - def delete(self, context, provider_id, checkpoint_id): - return self.protection_rpcapi.delete( - context, - provider_id, - checkpoint_id - ) - - def reset_state(self, context, provider_id, checkpoint_id, state): - return self.protection_rpcapi.reset_state( - context, - provider_id, - checkpoint_id, - state - ) - - def show_checkpoint(self, context, provider_id, checkpoint_id): - return self.protection_rpcapi.show_checkpoint( - context, - provider_id, - checkpoint_id - ) - - def list_checkpoints(self, context, provider_id, marker, limit, - sort_keys, sort_dirs, filters, offset, all_tenants): - return self.protection_rpcapi.list_checkpoints( - context, - provider_id, - marker, - limit, - sort_keys, - sort_dirs, - filters, - all_tenants - ) - - def list_protectable_types(self, context): - return self.protection_rpcapi.list_protectable_types(context) - - def show_protectable_type(self, context, protectable_type): - return self.protection_rpcapi.show_protectable_type( - context, - protectable_type - ) - - def list_protectable_instances(self, context, protectable_type, - marker, limit, sort_keys, - sort_dirs, filters, offset, parameters): - return self.protection_rpcapi.list_protectable_instances( - context, - protectable_type, - marker, - limit, - sort_keys, - sort_dirs, - filters, - parameters - ) - - def list_protectable_dependents(self, context, - protectable_id, - protectable_type, - protectable_name): - return self.protection_rpcapi.list_protectable_dependents( - context, - protectable_id, - protectable_type, - protectable_name - ) - - def show_protectable_instance(self, context, - protectable_type, - protectable_id, - parameters=None): - return self.protection_rpcapi.show_protectable_instance( - context, - protectable_type, - protectable_id, - parameters=parameters - ) - - def show_provider(self, context, provider_id): - return self.protection_rpcapi.show_provider(context, provider_id) - - def list_providers(self, context, marker, limit, - sort_keys, sort_dirs, filters, offset): - return self.protection_rpcapi.list_providers( - context, - marker, - limit, - sort_keys, - sort_dirs, - filters - ) diff --git a/karbor/services/protection/bank_plugin.py b/karbor/services/protection/bank_plugin.py deleted file mode 100644 index 82b0a5ba..00000000 --- a/karbor/services/protection/bank_plugin.py +++ /dev/null @@ -1,305 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os -import re -import six - -from karbor import exception -from karbor.i18n import _ - - -@six.add_metaclass(abc.ABCMeta) -class LeasePlugin(object): - @abc.abstractmethod - def acquire_lease(self): - pass - - @abc.abstractmethod - def renew_lease(self): - pass - - @abc.abstractmethod - def check_lease_validity(self): - pass - - -@six.add_metaclass(abc.ABCMeta) -class BankPlugin(object): - def __init__(self, config=None): - super(BankPlugin, self).__init__() - self._config = config - - @abc.abstractmethod - def update_object(self, key, value, context=None): - return - - @abc.abstractmethod - def get_object(self, key, context=None): - return - - @abc.abstractmethod - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - return - - @abc.abstractmethod - def delete_object(self, key, context=None): - return - - @abc.abstractmethod - def get_owner_id(self, context=None): - return - - -def validate_key(key): - pass - - -def validate_dir(key): - pass - - -class Bank(object): - _KEY_VALIDATION = re.compile(r'^[A-Za-z0-9/_.\-@]+(? self.obj_size: - return '' - return self.bank_section.get_object(self.sorted_objects[obj_index]) diff --git a/karbor/services/protection/bank_plugins/__init__.py b/karbor/services/protection/bank_plugins/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/bank_plugins/file_system_bank_plugin.py b/karbor/services/protection/bank_plugins/file_system_bank_plugin.py deleted file mode 100644 index 9fe0815d..00000000 --- a/karbor/services/protection/bank_plugins/file_system_bank_plugin.py +++ /dev/null @@ -1,187 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import errno -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from karbor import exception -from karbor.i18n import _ -from karbor.services.protection.bank_plugin import BankPlugin - -import six - -file_system_bank_plugin_opts = [ - cfg.StrOpt('file_system_bank_path', - help='The file system bank path to use.'), - cfg.StrOpt('bank_object_container', - default='karbor', - help='The file system bank container to use.'), -] - -LOG = logging.getLogger(__name__) - - -class FileSystemBankPlugin(BankPlugin): - """File system bank plugin""" - def __init__(self, config): - super(FileSystemBankPlugin, self).__init__(config) - self._config.register_opts(file_system_bank_plugin_opts, - "file_system_bank_plugin") - plugin_cfg = self._config.file_system_bank_plugin - self.file_system_bank_path = plugin_cfg.file_system_bank_path - self.bank_object_container = plugin_cfg.bank_object_container - - try: - self._create_dir(self.file_system_bank_path) - self.object_container_path = "/".join([self.file_system_bank_path, - self.bank_object_container]) - self._create_dir(self.object_container_path) - except OSError as err: - LOG.exception(_("Init file system bank failed. err: %s"), err) - - self.owner_id = uuidutils.generate_uuid() - - def _validate_path(self, path): - if path.find('..') >= 0: - msg = (_("The path(%s) is invalid.") % path) - raise exception.InvalidInput(msg) - - def _create_dir(self, path): - try: - original_umask = None - try: - original_umask = os.umask(0o022) - os.makedirs(path) - finally: - os.umask(original_umask) - except OSError as err: - if err.errno == errno.EEXIST and os.path.isdir(path): - pass - else: - LOG.exception(_("Create the directory failed. path: %s"), path) - raise - - def _write_object(self, path, data): - obj_file_name = None - try: - obj_path = self.object_container_path + path.rsplit('/', 1)[0] - obj_file_name = self.object_container_path + path - self._create_dir(obj_path) - mode = "wb" - if isinstance(data, six.string_types): - mode = "w" - with open(obj_file_name, mode=mode) as obj_file: - obj_file.write(data) - except (OSError, IOError): - LOG.exception(_("Write object failed. name: %s"), obj_file_name) - raise - - def _get_object(self, path): - obj_file_name = self.object_container_path + path - if not os.path.isfile(obj_file_name): - LOG.exception(_("Object is not a file. name: %s"), obj_file_name) - raise OSError("Object is not a file") - try: - with open(obj_file_name, mode='r') as obj_file: - data = obj_file.read() - return data - except OSError: - LOG.exception(_("Get object failed. name: %s"), obj_file_name) - raise - - def _delete_object(self, path): - obj_path = self.object_container_path + path.rsplit('/', 1)[0] - obj_file_name = self.object_container_path + path - try: - os.remove(obj_file_name) - if not os.listdir(obj_path) and ( - obj_path != self.object_container_path): - os.rmdir(obj_path) - except OSError: - LOG.exception(_("Delete the object failed. name: %s"), - obj_file_name) - raise - - def _list_object(self, path): - obj_file_path = self.object_container_path + path - if not os.path.isdir(obj_file_path): - LOG.debug(_("Path is not a directory. name: %s"), obj_file_path) - return () - try: - file_list = [] - for root, sub_dirs, files in os.walk(obj_file_path): - for file_path in files: - file_list.append(os.path.join(root, file_path)) - return file_list - except OSError: - LOG.exception(_("List the object failed. path: %s"), obj_file_path) - raise - - def get_owner_id(self, context=None): - return self.owner_id - - def update_object(self, key, value, context=None): - LOG.debug("FsBank: update_object. key: %s", key) - self._validate_path(key) - try: - if not isinstance(value, str): - value = jsonutils.dumps(value) - self._write_object(path=key, - data=value) - except OSError as err: - LOG.error("Update object failed. err: %s", err) - raise exception.BankUpdateObjectFailed(reason=err, - key=key) - - def delete_object(self, key, context=None): - LOG.debug("FsBank: delete_object. key: %s", key) - self._validate_path(key) - try: - self._delete_object(path=key) - except OSError as err: - LOG.error("Delete object failed. err: %s", err) - raise exception.BankDeleteObjectFailed(reason=err, - key=key) - - def get_object(self, key, context=None): - LOG.debug("FsBank: get_object. key: %s", key) - self._validate_path(key) - try: - data = self._get_object(path=key) - except OSError as err: - LOG.error("Get object failed. err: %s", err) - raise exception.BankGetObjectFailed(reason=err, - key=key) - if isinstance(data, six.string_types): - try: - data = jsonutils.loads(data) - except ValueError: - pass - return data - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - LOG.debug("FsBank: list_objects. key: %s", prefix) - try: - file_lists = self._list_object(prefix) - except OSError as err: - LOG.error("List objects failed. err: %s", err) - raise exception.BankListObjectsFailed(reason=err) - else: - container_path_length = len(self.object_container_path) - file_lists = [( - file_name[container_path_length:]) for file_name in file_lists] - return file_lists[-limit:] if limit is not None else file_lists diff --git a/karbor/services/protection/bank_plugins/s3_bank_plugin.py b/karbor/services/protection/bank_plugins/s3_bank_plugin.py deleted file mode 100644 index 753823e8..00000000 --- a/karbor/services/protection/bank_plugins/s3_bank_plugin.py +++ /dev/null @@ -1,256 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging as log -import math -import time - -from botocore.exceptions import ClientError -from karbor import exception -from karbor.i18n import _ -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import LeasePlugin -from karbor.services.protection import client_factory -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_service import loopingcall -from oslo_utils import uuidutils - -s3_bank_plugin_opts = [ - cfg.StrOpt('bank_s3_object_bucket', - default='karbor', - help='The default s3 object bucket to use.'), - cfg.StrOpt('bank_s3_lease_bucket', - default='lease', - help='The default s3 lease bucket to use.'), -] - -LOG = logging.getLogger(__name__) -log.getLogger('botocore').setLevel(log.WARNING) - -lease_opt = [cfg.IntOpt('lease_expire_window', - default=600, - help='expired_window for bank lease, in seconds'), - cfg.IntOpt('lease_renew_window', - default=120, - help='period for bank lease, in seconds, ' - 'between bank lease client renew the lease'), - cfg.IntOpt('lease_validity_window', - default=100, - help='validity_window for bank lease, in seconds'), ] - - -class S3ConnectionFailed(exception.KarborException): - message = _("Connection to s3 failed: %(reason)s") - - -class S3BankPlugin(BankPlugin, LeasePlugin): - """S3 bank plugin""" - def __init__(self, config, context=None): - super(S3BankPlugin, self).__init__(config) - self._config.register_opts(s3_bank_plugin_opts, - "s3_bank_plugin") - self._config.register_opts(lease_opt, - "s3_bank_plugin") - plugin_cfg = self._config.s3_bank_plugin - self.bank_object_bucket = plugin_cfg.bank_s3_object_bucket - self.lease_expire_window = plugin_cfg.lease_expire_window - self.lease_renew_window = plugin_cfg.lease_renew_window - self.context = context - self.lease_validity_window = plugin_cfg.lease_validity_window - - self.owner_id = uuidutils.generate_uuid() - self.lease_expire_time = 0 - self.bank_leases_bucket = plugin_cfg.bank_s3_lease_bucket - self._connection = None - - def _setup_connection(self): - return client_factory.ClientFactory.create_client( - 's3', - self.context, - self._config - ) - - @property - def connection(self): - if not self._connection: - _connection = self._setup_connection() - # create bucket - try: - _connection.create_bucket(Bucket=self.bank_object_bucket) - _connection.create_bucket(Bucket=self.bank_leases_bucket) - except S3ConnectionFailed as err: - LOG.error("bank plugin create bucket failed.") - raise exception.CreateBucketrFailed(reason=err) - self._connection = _connection - - # acquire lease - try: - self.acquire_lease() - except exception.AcquireLeaseFailed: - LOG.error("bank plugin acquire lease failed.") - raise - - # start renew lease - renew_lease_loop = loopingcall.FixedIntervalLoopingCall( - self.renew_lease - ) - renew_lease_loop.start( - interval=self.lease_renew_window, - initial_delay=self.lease_renew_window - ) - return self._connection - - def get_owner_id(self, context=None): - return self.owner_id - - def update_object(self, key, value, context=None): - serialized = False - try: - if not isinstance(value, str): - value = jsonutils.dumps(value) - serialized = True - self._put_object(bucket=self.bank_object_bucket, - obj=key, - contents=value, - headers={ - 'x-object-meta-serialized': str(serialized) - }) - except S3ConnectionFailed as err: - LOG.error("update object failed, err: %s.", err) - raise exception.BankUpdateObjectFailed(reason=err, key=key) - - def delete_object(self, key, context=None): - try: - self._delete_object(bucket=self.bank_object_bucket, - obj=key) - except S3ConnectionFailed as err: - LOG.error("delete object failed, err: %s.", err) - raise exception.BankDeleteObjectFailed(reason=err, key=key) - - def get_object(self, key, context=None): - try: - return self._get_object(bucket=self.bank_object_bucket, - obj=key) - except S3ConnectionFailed as err: - LOG.error("get object failed, err: %s.", err) - raise exception.BankGetObjectFailed(reason=err, key=key) - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - try: - response = self._get_bucket( - bucket=self.bank_object_bucket, - prefix=prefix, - limit=limit, - marker=marker - ) - return [obj['Key'] for obj in response] - except S3ConnectionFailed as err: - LOG.error("list objects failed, err: %s.", err) - raise exception.BankListObjectsFailed(reason=err) - - def acquire_lease(self): - bucket = self.bank_leases_bucket - obj = self.owner_id - contents = self.owner_id - self.lease_expire_time = \ - math.floor(time.time()) + self.lease_expire_window - headers = {'lease-expire-time': str(self.lease_expire_time)} - try: - self._put_object(bucket=bucket, - obj=obj, - contents=contents, - headers=headers) - except S3ConnectionFailed as err: - LOG.error("acquire lease failed, err:%s.", err) - raise exception.AcquireLeaseFailed(reason=err) - - def renew_lease(self): - self.acquire_lease() - - def check_lease_validity(self): - if (self.lease_expire_time - math.floor(time.time()) >= - self.lease_validity_window): - return True - else: - self._delete_object( - bucket=self.bank_leases_bucket, - obj=self.owner_id - ) - return False - - def _put_object(self, bucket, obj, contents, headers=None): - try: - self.connection.put_object( - Bucket=bucket, - Key=obj, - Body=contents, - Metadata=headers - ) - except ClientError as err: - raise S3ConnectionFailed(reason=err) - - def _get_object(self, bucket, obj): - try: - response = self.connection.get_object(Bucket=bucket, Key=obj) - body = response['Body'].read() - if response['Metadata']["x-object-meta-serialized"]\ - .lower() == "true": - body = jsonutils.loads(body) - return body - except ClientError as err: - raise S3ConnectionFailed(reason=err) - - def _delete_object(self, bucket, obj): - try: - self.connection.delete_object(Bucket=bucket, - Key=obj) - except ClientError as err: - raise S3ConnectionFailed(reason=err) - - def _get_bucket(self, bucket, prefix=None, limit=None, - marker=None): - try: - prefix = '' if prefix is None else prefix - marker = '' if marker is None else marker - objects_to_return = [] - if limit is None: - is_truncated = True - while is_truncated: - response = self.connection.list_objects( - Bucket=bucket, - Prefix=prefix, - Marker=marker - ) - if 'Contents' not in response: - break - - is_truncated = response['IsTruncated'] - objects_to_return.extend(response['Contents']) - marker = response['Contents'][-1]['Key'] - else: - response = self.connection.list_objects( - Bucket=bucket, - Prefix=prefix, - MaxKeys=limit, - Marker=marker - ) - - if 'Contents' in response: - objects_to_return.extend(response['Contents']) - - except ClientError as err: - raise S3ConnectionFailed(reason=err) - else: - return objects_to_return diff --git a/karbor/services/protection/bank_plugins/swift_bank_plugin.py b/karbor/services/protection/bank_plugins/swift_bank_plugin.py deleted file mode 100644 index 0086eb47..00000000 --- a/karbor/services/protection/bank_plugins/swift_bank_plugin.py +++ /dev/null @@ -1,253 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging as log -import math -import time - -from karbor import exception -from karbor.i18n import _ -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import LeasePlugin -from karbor.services.protection import client_factory -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_service import loopingcall -from oslo_utils import uuidutils -from swiftclient import ClientException - - -swift_bank_plugin_opts = [ - cfg.StrOpt('bank_swift_object_container', - default='karbor', - help='The default swift container to use.'), -] - -LOG = logging.getLogger(__name__) -log.getLogger('swiftclient').setLevel(log.WARNING) - -lease_opt = [cfg.IntOpt('lease_expire_window', - default=600, - help='expired_window for bank lease, in seconds'), - cfg.IntOpt('lease_renew_window', - default=120, - help='period for bank lease, in seconds, ' - 'between bank lease client renew the lease'), - cfg.IntOpt('lease_validity_window', - default=100, - help='validity_window for bank lease, in seconds'), ] - - -class SwiftConnectionFailed(exception.KarborException): - message = _("Connection to swift failed: %(reason)s") - - -class SwiftBankPlugin(BankPlugin, LeasePlugin): - """Swift bank plugin""" - def __init__(self, config, context=None): - super(SwiftBankPlugin, self).__init__(config) - self._config.register_opts(swift_bank_plugin_opts, - "swift_bank_plugin") - self._config.register_opts(lease_opt, - "swift_bank_plugin") - plugin_cfg = self._config.swift_bank_plugin - self.bank_object_container = plugin_cfg.bank_swift_object_container - self.lease_expire_window = plugin_cfg.lease_expire_window - self.lease_renew_window = plugin_cfg.lease_renew_window - self.context = context - # TODO(luobin): - # init lease_validity_window - # according to lease_renew_window if not configured - self.lease_validity_window = plugin_cfg.lease_validity_window - - # TODO(luobin): create a uuid of this bank_plugin - self.owner_id = uuidutils.generate_uuid() - self.lease_expire_time = 0 - self.bank_leases_container = "leases" - self._connection = None - - def _setup_connection(self): - return client_factory.ClientFactory.create_client('swift', - self.context, - self._config) - - @property - def connection(self): - if not self._connection: - _connection = self._setup_connection() - # create container - try: - _connection.put_container(self.bank_object_container) - _connection.put_container(self.bank_leases_container) - except SwiftConnectionFailed as err: - LOG.error("bank plugin create container failed.") - raise exception.CreateContainerFailed(reason=err) - self._connection = _connection - - # acquire lease - try: - self.acquire_lease() - except exception.AcquireLeaseFailed: - LOG.error("bank plugin acquire lease failed.") - raise - - # start renew lease - renew_lease_loop = loopingcall.FixedIntervalLoopingCall( - self.renew_lease) - renew_lease_loop.start(interval=self.lease_renew_window, - initial_delay=self.lease_renew_window) - return self._connection - - def get_owner_id(self, context=None): - return self.owner_id - - def update_object(self, key, value, context=None): - serialized = False - try: - if not isinstance(value, str): - value = jsonutils.dumps(value) - serialized = True - self._put_object(container=self.bank_object_container, - obj=key, - contents=value, - headers={ - 'x-object-meta-serialized': str(serialized) - }) - except SwiftConnectionFailed as err: - LOG.error("update object failed, err: %s.", err) - raise exception.BankUpdateObjectFailed(reason=err, key=key) - - def delete_object(self, key, context=None): - try: - self._delete_object(container=self.bank_object_container, - obj=key) - except SwiftConnectionFailed as err: - LOG.error("delete object failed, err: %s.", err) - raise exception.BankDeleteObjectFailed(reason=err, key=key) - - def get_object(self, key, context=None): - try: - return self._get_object(container=self.bank_object_container, - obj=key) - except SwiftConnectionFailed as err: - LOG.error("get object failed, err: %s.", err) - raise exception.BankGetObjectFailed(reason=err, key=key) - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - try: - if sort_dir == "desc": - body = self._get_container( - container=self.bank_object_container, - prefix=prefix, end_marker=marker) - return [obj.get("name") for obj in body] - else: - body = self._get_container( - container=self.bank_object_container, - prefix=prefix, limit=limit, marker=marker) - return [obj.get("name") for obj in body] - except SwiftConnectionFailed as err: - LOG.error("list objects failed, err: %s.", err) - raise exception.BankListObjectsFailed(reason=err) - - def acquire_lease(self): - container = self.bank_leases_container - obj = self.owner_id - contents = self.owner_id - headers = {'X-Delete-After': str(self.lease_expire_window)} - try: - self._put_object(container=container, - obj=obj, - contents=contents, - headers=headers) - self.lease_expire_time = math.floor( - time.time()) + self.lease_expire_window - except SwiftConnectionFailed as err: - LOG.error("acquire lease failed, err:%s.", err) - raise exception.AcquireLeaseFailed(reason=err) - - def renew_lease(self): - container = self.bank_leases_container - obj = self.owner_id - headers = {'X-Delete-After': str(self.lease_expire_window)} - try: - self._post_object(container=container, - obj=obj, - headers=headers) - self.lease_expire_time = math.floor( - time.time()) + self.lease_expire_window - except SwiftConnectionFailed as err: - LOG.error("acquire lease failed, err:%s.", err) - - def check_lease_validity(self): - if (self.lease_expire_time - math.floor(time.time()) >= - self.lease_validity_window): - return True - else: - return False - - def _put_object(self, container, obj, contents, headers=None): - try: - self.connection.put_object(container=container, - obj=obj, - contents=contents, - headers=headers) - except ClientException as err: - raise SwiftConnectionFailed(reason=err) - - def _get_object(self, container, obj): - try: - (_resp, body) = self.connection.get_object(container=container, - obj=obj) - if _resp.get("x-object-meta-serialized").lower() == "true": - body = jsonutils.loads(body) - return body - except ClientException as err: - raise SwiftConnectionFailed(reason=err) - - def _post_object(self, container, obj, headers): - try: - self.connection.post_object(container=container, - obj=obj, - headers=headers) - except ClientException as err: - raise SwiftConnectionFailed(reason=err) - - def _delete_object(self, container, obj): - try: - self.connection.delete_object(container=container, - obj=obj) - except ClientException as err: - raise SwiftConnectionFailed(reason=err) - - def _put_container(self, container): - try: - self.connection.put_container(container=container) - except ClientException as err: - raise SwiftConnectionFailed(reason=err) - - def _get_container(self, container, prefix=None, limit=None, marker=None, - end_marker=None): - full_listing = True if limit is None else False - try: - (_resp, body) = self.connection.get_container( - container=container, - prefix=prefix, - limit=limit, - marker=marker, - end_marker=end_marker, - full_listing=full_listing - ) - return body - except ClientException as err: - raise SwiftConnectionFailed(reason=err) diff --git a/karbor/services/protection/checkpoint.py b/karbor/services/protection/checkpoint.py deleted file mode 100644 index 2dee035c..00000000 --- a/karbor/services/protection/checkpoint.py +++ /dev/null @@ -1,424 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from karbor.common import constants -from karbor import exception -from karbor.i18n import _ -from karbor.services.protection import graph -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from oslo_utils import uuidutils - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - -_INDEX_FILE_NAME = "index.json" -_UUID_STR_LEN = 36 - - -class Checkpoint(object): - VERSION = "0.9" - SUPPORTED_VERSIONS = ["0.9"] - - def __init__(self, checkpoint_section, indices_section, - bank_lease, checkpoint_id): - super(Checkpoint, self).__init__() - self._id = checkpoint_id - self._checkpoint_section = checkpoint_section - self._indices_section = indices_section - self._bank_lease = bank_lease - self.reload_meta_data() - - def to_dict(self): - return { - "id": self.id, - "status": self.status, - "protection_plan": self.protection_plan, - "extra_info": self._md_cache.get("extra_info", None), - "project_id": self.project_id, - "resource_graph": self._md_cache.get("resource_graph", None), - "created_at": self._md_cache.get("created_at", None) - } - - @property - def checkpoint_section(self): - return self._checkpoint_section - - @property - def id(self): - return self._id - - @property - def provider_id(self): - return self._md_cache["provider_id"] - - @property - def created_at(self): - return self._md_cache["created_at"] - - @property - def status(self): - # TODO(saggi): check for valid values and transitions - return self._md_cache["status"] - - @property - def extra_info(self): - return self._md_cache["extra_info"] - - @property - def project_id(self): - return self._md_cache["project_id"] - - @property - def owner_id(self): - # TODO(yinwei): check for valid values and transitions - return self._md_cache["owner_id"] - - @property - def resource_graph(self): - serialized_resource_graph = self._md_cache.get("resource_graph", None) - if serialized_resource_graph is not None: - resource_graph = graph.deserialize_resource_graph( - serialized_resource_graph) - return resource_graph - else: - return None - - @property - def protection_plan(self): - return self._md_cache["protection_plan"] - - @status.setter - def status(self, value): - self._md_cache["status"] = value - - @extra_info.setter - def extra_info(self, value): - self._md_cache["extra_info"] = value - - @resource_graph.setter - def resource_graph(self, resource_graph): - serialized_resource_graph = graph.serialize_resource_graph( - resource_graph) - self._md_cache["resource_graph"] = serialized_resource_graph - - def _is_supported_version(self, version): - return version in self.SUPPORTED_VERSIONS - - def _assert_supported_version(self, new_md): - if new_md["version"] not in self.SUPPORTED_VERSIONS: - # Something bad happened invalidate the object - self._md_cache = None - self._checkpoint_section = None - raise RuntimeError( - _("Checkpoint was created in an unsupported version")) - - def reload_meta_data(self): - try: - new_md = self._checkpoint_section.get_object(_INDEX_FILE_NAME) - except exception.BankGetObjectFailed: - LOG.error("unable to reload metadata for checkpoint id: %s", - self.id) - raise exception.CheckpointNotFound(checkpoint_id=self.id) - self._assert_supported_version(new_md) - self._md_cache = new_md - - @classmethod - def _generate_id(cls): - return uuidutils.generate_uuid() - - @classmethod - def get_by_section(cls, checkpoints_section, indices_section, - bank_lease, checkpoint_id, context=None): - # TODO(yuvalbr) add validation that the checkpoint exists - checkpoint_section = checkpoints_section.get_sub_section(checkpoint_id) - return Checkpoint(checkpoint_section, indices_section, - bank_lease, checkpoint_id) - - @staticmethod - def _get_checkpoint_path_by_provider( - provider_id, project_id, timestamp, checkpoint_id): - return "/by-provider/%s/%s/%s@%s" % ( - provider_id, project_id, timestamp, checkpoint_id) - - @staticmethod - def _get_checkpoint_path_by_plan( - plan_id, project_id, created_at, timestamp, checkpoint_id): - return "/by-plan/%s/%s/%s/%s@%s" % ( - plan_id, project_id, created_at, timestamp, checkpoint_id) - - @staticmethod - def _get_checkpoint_path_by_date( - created_at, project_id, timestamp, checkpoint_id): - return "/by-date/%s/%s/%s@%s" % ( - created_at, project_id, timestamp, checkpoint_id) - - @classmethod - def create_in_section(cls, checkpoints_section, indices_section, - bank_lease, owner_id, plan, - checkpoint_id=None, checkpoint_properties=None, - context=None): - checkpoint_id = checkpoint_id or cls._generate_id() - checkpoint_section = checkpoints_section.get_sub_section(checkpoint_id) - - timestamp = timeutils.utcnow_ts() - created_at = timeutils.utcnow().strftime('%Y-%m-%d') - - provider_id = plan.get("provider_id") - project_id = plan.get("project_id") - extra_info = None - checkpoint_status = constants.CHECKPOINT_STATUS_PROTECTING - if checkpoint_properties: - extra_info = checkpoint_properties.get("extra_info", None) - status = checkpoint_properties.get("status", None) - if status: - checkpoint_status = status - checkpoint_section.update_object( - key=_INDEX_FILE_NAME, - value={ - "version": cls.VERSION, - "id": checkpoint_id, - "status": checkpoint_status, - "owner_id": owner_id, - "provider_id": provider_id, - "project_id": project_id, - "protection_plan": { - "id": plan.get("id"), - "name": plan.get("name"), - "provider_id": plan.get("provider_id"), - "resources": plan.get("resources") - }, - "extra_info": extra_info, - "created_at": created_at, - "timestamp": timestamp - }, - context=context - ) - - indices_section.update_object( - key=cls._get_checkpoint_path_by_provider( - provider_id, project_id, timestamp, checkpoint_id), - value=checkpoint_id, - context=context - ) - - indices_section.update_object( - key=cls._get_checkpoint_path_by_date( - created_at, project_id, timestamp, checkpoint_id), - value=checkpoint_id, - context=context - ) - - indices_section.update_object( - key=cls._get_checkpoint_path_by_plan( - plan.get("id"), project_id, created_at, timestamp, - checkpoint_id), - value=checkpoint_id, - context=context) - - return Checkpoint(checkpoint_section, - indices_section, - bank_lease, - checkpoint_id) - - def commit(self, context=None): - self._checkpoint_section.update_object( - key=_INDEX_FILE_NAME, - value=self._md_cache, - context=context - ) - - def purge(self, context=None): - """Purge the index file of the checkpoint. - - Can only be done if the checkpoint has no other files apart from the - index. - """ - all_objects = self._checkpoint_section.list_objects() - if len(all_objects) == 1 and all_objects[0] == _INDEX_FILE_NAME: - created_at = self._md_cache["created_at"] - timestamp = self._md_cache["timestamp"] - plan_id = self._md_cache["protection_plan"]["id"] - provider_id = self._md_cache["protection_plan"]["provider_id"] - project_id = self._md_cache["project_id"] - self._indices_section.delete_object( - self._get_checkpoint_path_by_provider( - provider_id, project_id, timestamp, self.id)) - self._indices_section.delete_object( - self._get_checkpoint_path_by_date( - created_at, project_id, timestamp, self.id)) - self._indices_section.delete_object( - self._get_checkpoint_path_by_plan( - plan_id, project_id, created_at, timestamp, self.id)) - - self._checkpoint_section.delete_object(_INDEX_FILE_NAME) - else: - raise RuntimeError(_("Could not delete: Checkpoint is not empty")) - - def delete(self, context=None): - self.status = constants.CHECKPOINT_STATUS_DELETED - self.commit(context=context) - # delete indices - created_at = self._md_cache["created_at"] - timestamp = self._md_cache["timestamp"] - plan_id = self._md_cache["protection_plan"]["id"] - provider_id = self._md_cache["protection_plan"]["provider_id"] - project_id = self._md_cache["project_id"] - self._indices_section.delete_object( - self._get_checkpoint_path_by_provider( - provider_id, project_id, timestamp, self.id), - context=context) - self._indices_section.delete_object( - self._get_checkpoint_path_by_date( - created_at, project_id, timestamp, self.id), - context=context) - self._indices_section.delete_object( - self._get_checkpoint_path_by_plan( - plan_id, project_id, created_at, timestamp, self.id), - context=context) - - def get_resource_bank_section(self, resource_id): - prefix = "/resource-data/%s/" % resource_id - return self._checkpoint_section.get_sub_section(prefix) - - -class CheckpointCollection(object): - - def __init__(self, bank, bank_lease=None): - super(CheckpointCollection, self).__init__() - self._bank = bank - self._bank_lease = bank_lease - self._checkpoints_section = bank.get_sub_section("/checkpoints") - self._indices_section = bank.get_sub_section("/indices") - - @staticmethod - def _get_prefix_and_marker_by_provider(provider_id, project_id, marker, - marker_checkpoint, all_tenants): - if all_tenants: - prefix = "/by-provider/%s/" % provider_id - marker = "/%s/%s" % ( - marker_checkpoint["project_id"], marker) if marker else marker - else: - prefix = "/by-provider/%s/%s/" % (provider_id, project_id) - marker = "/%s" % marker if marker else marker - return prefix, marker - - @staticmethod - def _get_prefix_and_marker_by_plan(plan_id, project_id, marker, - marker_checkpoint, all_tenants): - if all_tenants: - prefix = "/by-plan/%s/" % plan_id - marker = "/%s/%s/%s" % ( - marker_checkpoint["project_id"], - marker_checkpoint["created_at"], marker) if marker else marker - else: - prefix = "/by-plan/%s/%s/" % (plan_id, project_id) - marker = "/%s/%s" % ( - marker_checkpoint["created_at"], marker) if marker else marker - return prefix, marker - - @staticmethod - def _get_prefix_and_marker_by_date(project_id, marker, marker_checkpoint, - all_tenants): - prefix = "/by-date/" - if all_tenants: - marker = "/%s/%s/%s" % ( - marker_checkpoint["created_at"], - marker_checkpoint["project_id"], marker) if marker else marker - else: - marker = "/%s/%s/%s" % ( - marker_checkpoint["created_at"], project_id, - marker) if marker else marker - return prefix, marker - - def list_ids(self, project_id, provider_id, limit=None, marker=None, - plan_id=None, start_date=None, end_date=None, sort_dir=None, - context=None, all_tenants=False): - marker_checkpoint = None - if marker is not None: - checkpoint_section = self._checkpoints_section.get_sub_section( - marker) - marker_checkpoint = checkpoint_section.get_object(_INDEX_FILE_NAME) - timestamp = marker_checkpoint["timestamp"] - marker = "%s@%s" % (timestamp, marker) - - if start_date is not None: - if end_date is None: - end_date = timeutils.utcnow() - - if plan_id is None and start_date is None: - prefix, marker = self._get_prefix_and_marker_by_provider( - provider_id, project_id, marker, - marker_checkpoint, all_tenants) - elif plan_id is not None: - prefix, marker = self._get_prefix_and_marker_by_plan( - plan_id, project_id, marker, marker_checkpoint, all_tenants) - else: - prefix, marker = self._get_prefix_and_marker_by_date( - project_id, marker, marker_checkpoint, all_tenants) - - return self._list_ids(project_id, prefix, limit, marker, start_date, - end_date, sort_dir, context=context, - all_tenants=all_tenants) - - def _list_ids(self, project_id, prefix, limit, marker, start_date, - end_date, sort_dir, context=None, all_tenants=False): - if start_date is None: - return [key[key.find("@") + 1:] - for key in self._indices_section.list_objects( - prefix=prefix, - limit=limit, - marker=marker, - sort_dir=sort_dir, - context=context - )] - else: - ids = [] - for key in self._indices_section.list_objects(prefix=prefix, - marker=marker, - sort_dir=sort_dir, - context=context): - date_cursor = -2 if (prefix.find('by-plan') >= 0) else -3 - project_id_cursor = -3 if (prefix.find('by-plan') >= 0) else -2 - date = datetime.strptime( - key.split("/")[date_cursor], "%Y-%m-%d") - checkpoint_project_id = key.split("/")[project_id_cursor] - if start_date <= date <= end_date and ( - all_tenants or ( - checkpoint_project_id == project_id)): - ids.append(key[key.find("@") + 1:]) - if limit is not None and len(ids) == limit: - return ids - return ids - - def get(self, checkpoint_id, context=None): - # TODO(saggi): handle multiple instances of the same checkpoint - return Checkpoint.get_by_section(self._checkpoints_section, - self._indices_section, - self._bank_lease, - checkpoint_id, - context=context) - - def create(self, plan, checkpoint_properties=None, context=None): - # TODO(saggi): Serialize plan to checkpoint. Will be done in - # future patches. - return Checkpoint.create_in_section( - self._checkpoints_section, - self._indices_section, - self._bank_lease, - self._bank.get_owner_id(), - plan, - checkpoint_properties=checkpoint_properties, - context=context) diff --git a/karbor/services/protection/client_factory.py b/karbor/services/protection/client_factory.py deleted file mode 100644 index 1cf41f15..00000000 --- a/karbor/services/protection/client_factory.py +++ /dev/null @@ -1,105 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import service_token -from keystoneauth1 import session as keystone_session - -import os -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils - -from karbor.common import karbor_keystone_plugin -from karbor import exception -from karbor.i18n import _ - -LOG = logging.getLogger(__name__) - - -class ClientFactory(object): - _factory = None - _keystone_plugin = None - - @staticmethod - def _list_clients(): - clients_dir = os.path.join(os.path.dirname(__file__), 'clients') - if not os.path.isdir(clients_dir): - LOG.error('clients directory "%s" not found', clients_dir) - return - - for file in os.listdir(clients_dir): - name, ext = os.path.splitext(file) - if name != '__init__' and name != 'utils' and ext == '.py': - LOG.debug('Found client "%s"', name) - yield '%s.clients.%s' % (__package__, name) - - @classmethod - def _generate_session(cls, context, service, privileged_user=False): - LOG.debug("Generate an auth session. privileged_user: %s", - privileged_user) - plugin = cls.get_keystone_plugin() - try: - if privileged_user is True: - auth_plugin = service_token.ServiceTokenAuthWrapper( - plugin.service_auth_plugin, - plugin.service_auth_plugin) - else: - auth_plugin = service_token.ServiceTokenAuthWrapper( - plugin.create_user_auth_plugin(context), - plugin.service_auth_plugin) - except Exception: - return None - - try: - client_conf = cfg.CONF['%s_client' % service] - auth_insecure = client_conf['%s_auth_insecure' % service] - ca_file = client_conf['%s_ca_cert_file' % service] - verify = False if auth_insecure else (ca_file or True) - - except Exception: - verify = True - - return keystone_session.Session(auth=auth_plugin, verify=verify) - - @classmethod - def get_keystone_plugin(cls): - if not cls._keystone_plugin: - cls._keystone_plugin = \ - karbor_keystone_plugin.KarborKeystonePlugin() - return cls._keystone_plugin - - @classmethod - def get_client_module(cls, service): - if not cls._factory: - cls._factory = {} - for client_module in cls._list_clients(): - try: - client_module = importutils.import_module(client_module) - except ImportError: - LOG.error('No module named %s', client_module) - else: - cls._factory[client_module.SERVICE] = client_module - return cls._factory.get(service) - - @classmethod - def create_client(cls, service, context, conf=cfg.CONF, - privileged_user=False, **kwargs): - module = cls.get_client_module(service) - if module is None: - raise exception.KarborException(_('Unknown service(%s)') % service) - - kwargs['privileged_user'] = privileged_user - kwargs['keystone_plugin'] = cls.get_keystone_plugin() - if context or privileged_user: - kwargs['session'] = cls._generate_session(context, service, - privileged_user) - return module.create(context, conf, **kwargs) diff --git a/karbor/services/protection/clients/__init__.py b/karbor/services/protection/clients/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/clients/cinder.py b/karbor/services/protection/clients/cinder.py deleted file mode 100644 index 1b9368b1..00000000 --- a/karbor/services/protection/clients/cinder.py +++ /dev/null @@ -1,71 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinderclient import client as cc -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import config -from karbor.services.protection.clients import utils - -LOG = logging.getLogger(__name__) - -SERVICE = "cinder" -cinder_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the cinder endpoint.'), - cfg.StrOpt(SERVICE + '_catalog_info', - default='volumev3:cinderv3:publicURL', - help='Info to match when looking for cinder in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if cinder_endpoint is unset'), - cfg.StrOpt(SERVICE + '_ca_cert_file', - help='Location of the CA certificate file ' - 'to use for client requests in SSL connections.'), - cfg.BoolOpt(SERVICE + '_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to Cinder.'), -] - -CONFIG_GROUP = '%s_client' % SERVICE -CONF = cfg.CONF -CONF.register_opts(config.service_client_opts, group=CONFIG_GROUP) -CONF.register_opts(cinder_client_opts, group=CONFIG_GROUP) -CONF.set_default('service_name', 'cinderv3', CONFIG_GROUP) -CONF.set_default('service_type', 'volumev3', CONFIG_GROUP) - -CINDERCLIENT_VERSION = '3.43' - - -def create(context, conf, **kwargs): - conf.register_opts(cinder_client_opts, group=CONFIG_GROUP) - - client_config = conf[CONFIG_GROUP] - url = utils.get_url(SERVICE, context, client_config, - append_project_fmt='%(url)s/%(project)s', **kwargs) - LOG.debug('Creating cinder client with url %s.', url) - - if kwargs.get('session'): - return cc.Client(CINDERCLIENT_VERSION, session=kwargs.get('session'), - endpoint_override=url) - - args = { - 'project_id': context.project_id, - 'cacert': client_config.cinder_ca_cert_file, - 'insecure': client_config.cinder_auth_insecure, - } - client = cc.Client(CINDERCLIENT_VERSION, **args) - client.client.auth_token = context.auth_token - client.client.management_url = url - return client diff --git a/karbor/services/protection/clients/eisoo.py b/karbor/services/protection/clients/eisoo.py deleted file mode 100644 index ed3b7e01..00000000 --- a/karbor/services/protection/clients/eisoo.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from abclient import client -from oslo_config import cfg -from oslo_log import log as logging - -from karbor import utils - -EISOO_JOB_TYPE = (ORACLE_JOB_TYPE) = (1) -EISOO_JOB_STATUS = (RUNNING, SUCCESS, FAILED) = (4, 32, 64) - -SERVICE = "eisoo" -eisoo_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the eisoo endpoint.'), - cfg.StrOpt(SERVICE + '_app_id', - help='App id for eisoo authentication.'), - cfg.StrOpt(SERVICE + '_app_secret', - secret=True, - help='App secret for eisoo authentication.'), -] - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def create(context, conf): - config_dir = utils.find_config(CONF.provider_config_dir) - config_file = os.path.abspath(os.path.join(config_dir, - 'eisoo.conf')) - config = cfg.ConfigOpts() - config(args=['--config-file=' + config_file]) - config.register_opts(eisoo_client_opts, - group=SERVICE + '_client') - - LOG.info('Creating eisoo client with url %s.', - config.eisoo_client.eisoo_endpoint) - abclient = client.ABClient(config.eisoo_client.eisoo_endpoint, - config.eisoo_client.eisoo_app_id, - config.eisoo_client.eisoo_app_secret) - - return abclient diff --git a/karbor/services/protection/clients/freezer.py b/karbor/services/protection/clients/freezer.py deleted file mode 100644 index ba7d0df4..00000000 --- a/karbor/services/protection/clients/freezer.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from freezerclient.v1 import client as freezer_client -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import config -from karbor.services.protection.clients import utils - -LOG = logging.getLogger(__name__) - -SERVICE = "freezer" -freezer_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the freezer endpoint.'), - cfg.StrOpt(SERVICE + '_catalog_info', - default='backup:freezer:publicURL', - help='Info to match when looking for freezer in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if freezer_endpoint is unset'), - cfg.StrOpt(SERVICE + '_ca_cert_file', - help='Location of the CA certificate file ' - 'to use for client requests in SSL connections.'), - cfg.BoolOpt(SERVICE + '_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to Freezer.'), -] - -CONFIG_GROUP = '%s_client' % SERVICE -CONF = cfg.CONF -CONF.register_opts(config.service_client_opts, group=CONFIG_GROUP) -CONF.register_opts(config.keystone_client_opts, group=CONFIG_GROUP) -CONF.register_opts(freezer_client_opts, group=CONFIG_GROUP) -CONF.set_default('service_name', 'freezer', CONFIG_GROUP) -CONF.set_default('service_type', 'backup', CONFIG_GROUP) - - -def create(context, conf, **kwargs): - conf.register_opts(freezer_client_opts, group=CONFIG_GROUP) - - client_config = conf[CONFIG_GROUP] - url = utils.get_url(SERVICE, context, client_config, - append_project_fmt='%(url)s/%(project)s', **kwargs) - - if kwargs.get('session'): - return freezer_client.Client(session=kwargs.get('session'), - endpoint=url) - args = { - 'project_id': context.project_id, - 'project_name': context.project_name, - 'cacert': client_config.freezer_ca_cert_file, - 'insecure': client_config.freezer_auth_insecure, - 'endpoint': url, - 'token': context.auth_token, - 'auth_url': client_config.auth_uri - } - return freezer_client.Client(**args) diff --git a/karbor/services/protection/clients/glance.py b/karbor/services/protection/clients/glance.py deleted file mode 100644 index d44b9c57..00000000 --- a/karbor/services/protection/clients/glance.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glanceclient import client as gc -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import config -from karbor.services.protection.clients import utils - -LOG = logging.getLogger(__name__) - -SERVICE = 'glance' -glance_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the glance endpoint.'), - cfg.StrOpt(SERVICE + '_catalog_info', - default='image:glance:publicURL', - help='Info to match when looking for glance in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if glance_endpoint is unset'), - cfg.StrOpt(SERVICE + '_ca_cert_file', - help='Location of the CA certificate file ' - 'to use for client requests in SSL connections.'), - cfg.BoolOpt(SERVICE + '_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to Glance.'), -] - -CONFIG_GROUP = '%s_client' % SERVICE -CONF = cfg.CONF -CONF.register_opts(config.service_client_opts, group=CONFIG_GROUP) -CONF.register_opts(glance_client_opts, group=CONFIG_GROUP) -CONF.set_default('service_name', 'glance', CONFIG_GROUP) -CONF.set_default('service_type', 'image', CONFIG_GROUP) - -GLANCECLIENT_VERSION = '2' - - -def create(context, conf, **kwargs): - conf.register_opts(glance_client_opts, group=CONFIG_GROUP) - - client_config = conf[CONFIG_GROUP] - url = utils.get_url(SERVICE, context, client_config, **kwargs) - LOG.debug("Creating glance client with url %s.", url) - - if kwargs.get('session'): - return gc.Client(GLANCECLIENT_VERSION, session=kwargs.get('session'), - endpoint=url) - - args = { - 'endpoint': url, - 'token': context.auth_token, - 'cacert': client_config.glance_ca_cert_file, - 'insecure': client_config.glance_auth_insecure, - } - return gc.Client(GLANCECLIENT_VERSION, **args) diff --git a/karbor/services/protection/clients/k8s.py b/karbor/services/protection/clients/k8s.py deleted file mode 100644 index 735bef3b..00000000 --- a/karbor/services/protection/clients/k8s.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from kubernetes import client -from kubernetes.client import api_client -from kubernetes.client.configuration import Configuration - -from oslo_config import cfg -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) -SERVICE = 'k8s' -kubernetes_client_opts = [ - cfg.StrOpt(SERVICE + '_host', - help='The IP address of the kubernetes api server.'), - cfg.StrOpt(SERVICE + '_ssl_ca_cert', - help='The certificate authority will be used for secure ' - 'access from Admission Controllers.'), - cfg.StrOpt(SERVICE + '_cert_file', - help='The client certificate file for the kubernetes ' - 'cluster.'), - cfg.StrOpt(SERVICE + '_key_file', - help='The client key file for the kubernetes cluster.') -] - - -def register_opts(conf): - conf.register_opts(kubernetes_client_opts, group=SERVICE + '_client') - - -def create(context, conf, **kwargs): - register_opts(conf) - - client_config = conf.k8s_client - LOG.info('Creating the kubernetes client with url %s.', - client_config.k8s_host) - - config = Configuration() - config.host = client_config.k8s_host - config.ssl_ca_cert = client_config.k8s_ssl_ca_cert - config.cert_file = client_config.k8s_cert_file - config.key_file = client_config.k8s_key_file - k8s_api_client = api_client.ApiClient(config) - k8s_core_v1_api = client.CoreV1Api(k8s_api_client) - return k8s_core_v1_api diff --git a/karbor/services/protection/clients/manila.py b/karbor/services/protection/clients/manila.py deleted file mode 100644 index 6972148f..00000000 --- a/karbor/services/protection/clients/manila.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from manilaclient import client as mc -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import config -from karbor.services.protection.clients import utils - -LOG = logging.getLogger(__name__) - -SERVICE = "manila" -manila_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the manila endpoint.'), - cfg.StrOpt(SERVICE + '_catalog_info', - default='sharev2:manilav2:publicURL', - help='Info to match when looking for manila in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if manila_endpoint is unset'), - cfg.StrOpt(SERVICE + '_ca_cert_file', - default=None, - help='Location of the CA certificate file ' - 'to use for client requests in SSL connections.'), - cfg.BoolOpt(SERVICE + '_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to manila.'), -] - -CONFIG_GROUP = '%s_client' % SERVICE -CONF = cfg.CONF -CONF.register_opts(config.service_client_opts, group=CONFIG_GROUP) -CONF.register_opts(manila_client_opts, group=CONFIG_GROUP) -CONF.set_default('service_name', 'manilav2', CONFIG_GROUP) -CONF.set_default('service_type', 'sharev2', CONFIG_GROUP) - -MANILACLIENT_VERSION = '2' - - -def create(context, conf, **kwargs): - conf.register_opts(manila_client_opts, group=CONFIG_GROUP) - - client_config = conf[CONFIG_GROUP] - url = utils.get_url(SERVICE, context, client_config, - append_project_fmt='%(url)s/%(project)s', **kwargs) - LOG.debug('Creating manila client with url %s.', url) - - if kwargs.get('session'): - return mc.Client(MANILACLIENT_VERSION, session=kwargs.get('session'), - endpoint_override=url) - - args = { - 'input_auth_token': context.auth_token, - 'project_id': context.project_id, - 'service_catalog_url': url, - 'cacert': client_config.manila_ca_cert_file, - 'insecure': client_config.manila_auth_insecure, - } - client = mc.Client(MANILACLIENT_VERSION, **args) - client.client.auth_token = context.auth_token - client.client.management_url = url - return client diff --git a/karbor/services/protection/clients/neutron.py b/karbor/services/protection/clients/neutron.py deleted file mode 100644 index a1e693aa..00000000 --- a/karbor/services/protection/clients/neutron.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutronclient.v2_0 import client as neutron_client -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import config -from karbor.services.protection.clients import utils - -LOG = logging.getLogger(__name__) - -SERVICE = 'neutron' -neutron_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the neutron endpoint.'), - cfg.StrOpt(SERVICE + '_catalog_info', - default='network:neutron:publicURL', - help='Info to match when looking for neutron in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if neutron_endpoint is unset'), - cfg.StrOpt(SERVICE + '_ca_cert_file', - help='Location of the CA certificate file ' - 'to use for client requests in SSL connections.'), - cfg.BoolOpt(SERVICE + '_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to Neutron.'), -] - -CONFIG_GROUP = '%s_client' % SERVICE -CONF = cfg.CONF -CONF.register_opts(config.service_client_opts, group=CONFIG_GROUP) -CONF.register_opts(neutron_client_opts, group=CONFIG_GROUP) -CONF.set_default('service_name', 'neutron', CONFIG_GROUP) -CONF.set_default('service_type', 'network', CONFIG_GROUP) - - -def create(context, conf, **kwargs): - conf.register_opts(neutron_client_opts, group=CONFIG_GROUP) - - client_config = conf[CONFIG_GROUP] - url = utils.get_url(SERVICE, context, client_config, **kwargs) - LOG.debug("Creating neutron client with url %s.", url) - - if kwargs.get('session'): - return neutron_client.Client(session=kwargs.get('session'), - endpoint_override=url) - - args = { - 'endpoint_url': url, - 'token': context.auth_token, - 'cacert': client_config.neutron_ca_cert_file, - 'insecure': client_config.neutron_auth_insecure, - } - return neutron_client.Client(**args) diff --git a/karbor/services/protection/clients/nova.py b/karbor/services/protection/clients/nova.py deleted file mode 100644 index 7a0b1023..00000000 --- a/karbor/services/protection/clients/nova.py +++ /dev/null @@ -1,71 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging as log -from novaclient import client as nc -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import config -from karbor import exception -from karbor.services.protection.clients import utils - -LOG = logging.getLogger(__name__) - -SERVICE = "nova" -nova_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the nova endpoint. ' - ''), - cfg.StrOpt(SERVICE + '_catalog_info', - default='compute:nova:publicURL', - help='Info to match when looking for nova in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if nova_endpoint is unset'), - cfg.StrOpt(SERVICE + '_ca_cert_file', - help='Location of the CA certificate file ' - 'to use for client requests in SSL connections.'), - cfg.BoolOpt(SERVICE + '_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to Nova.'), -] - -CONFIG_GROUP = '%s_client' % SERVICE -CONF = cfg.CONF -CONF.register_opts(config.service_client_opts, group=CONFIG_GROUP) -CONF.register_opts(nova_client_opts, group=CONFIG_GROUP) -CONF.set_default('service_name', 'nova', CONFIG_GROUP) -CONF.set_default('service_type', 'compute', CONFIG_GROUP) - -NOVACLIENT_VERSION = '2' - - -def create(context, conf, **kwargs): - conf.register_opts(nova_client_opts, group=CONFIG_GROUP) - - client_config = conf[CONFIG_GROUP] - url = utils.get_url(SERVICE, context, client_config, - append_project_fmt='%(url)s/%(project)s', **kwargs) - LOG.debug('Creating nova client with url %s.', url) - - extensions = nc.discover_extensions(NOVACLIENT_VERSION) - session = kwargs.get('session') - if session is None: - LOG.error('Creating nova client failed with url %s.', url) - raise exception.InvalidParameterValue( - err="The parameter session is None.") - - return nc.Client(NOVACLIENT_VERSION, extensions=extensions, - session=kwargs.get('session'), endpoint_override=url, - logger=log.getLogger('novaclient')) diff --git a/karbor/services/protection/clients/s3.py b/karbor/services/protection/clients/s3.py deleted file mode 100644 index 6019ebab..00000000 --- a/karbor/services/protection/clients/s3.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import botocore -import botocore.session -import logging -from oslo_config import cfg - -LOG = logging.getLogger(__name__) -SERVICE = 's3' -s3_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the S3 compatible storage endpoint.'), - cfg.StrOpt(SERVICE + '_access_key', - help='Access key for S3 compatible storage.'), - cfg.StrOpt(SERVICE + '_secret_key', - secret=True, - help='Secret key for S3 compatible storage.'), - cfg.IntOpt(SERVICE + '_retry_attempts', - default=3, - help='The number of retries to make for ' - 'S3 operations'), - cfg.IntOpt(SERVICE + '_retry_backoff', - default=2, - help='The backoff time in seconds ' - 'between S3 retries') -] - - -def register_opts(conf): - conf.register_opts(s3_client_opts, group=SERVICE + '_client') - - -def create(context, conf, **kwargs): - register_opts(conf) - - client_config = conf.s3_client - LOG.info('Creating s3 client with url %s.', - client_config.s3_endpoint) - return botocore.session.get_session().create_client( - 's3', - aws_access_key_id=client_config.s3_access_key, - aws_secret_access_key=client_config.s3_secret_key, - endpoint_url=client_config.s3_endpoint - ) diff --git a/karbor/services/protection/clients/swift.py b/karbor/services/protection/clients/swift.py deleted file mode 100644 index 6d436ac7..00000000 --- a/karbor/services/protection/clients/swift.py +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import identity -from keystoneauth1 import session as keystone_session -from oslo_config import cfg -from swiftclient import client as swift - -SERVICE = 'swift' -swift_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the swift endpoint. Only used if ' - 'swift_auth_url is unset'), - cfg.StrOpt(SERVICE + '_catalog_info', - default='object-store:swift:publicURL', - help='Info to match when looking for swift in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if swift_endpoint and swift_auth_url ' - 'are unset'), - cfg.StrOpt('swift_auth_url', - help='The URL of the Keystone endpoint'), - cfg.StrOpt('swift_tenant_name', - help='Swift tenant/account name. ' - 'Required when connecting to an auth system'), - cfg.StrOpt('swift_project_domain_name', - default='default', - help='Swift project domain name.' - 'Required when connecting to an auth system'), - cfg.StrOpt('swift_user_domain_name', - default='default', - help='Swift user domain name.' - 'Required when connecting to an auth system'), - cfg.StrOpt('swift_user', - help='Swift user name, if swift_auth_url is set.'), - cfg.StrOpt('swift_key', - secret=True, - help='Swift key for authentication, if swift_auth_url ' - ' is set.'), - cfg.IntOpt('swift_retry_attempts', - default=3, - help='The number of retries to make for ' - 'Swift operations'), - cfg.IntOpt('swift_retry_backoff', - default=2, - help='The backoff time in seconds ' - 'between Swift retries'), - cfg.StrOpt('swift_ca_cert_file', - help='Location of the CA certificate file ' - 'to use for swift client requests.'), - cfg.BoolOpt('swift_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to Swift.'), -] - - -def register_opts(conf): - conf.register_opts(swift_client_opts, group=SERVICE + '_client') - - -def create(context, conf, **kwargs): - register_opts(conf) - - client_config = conf.swift_client - session = kwargs.get('session', None) - - if not session: - auth = identity.Password( - auth_url=client_config.swift_auth_url, - username=client_config.swift_user, - password=client_config.swift_key, - project_name=client_config.swift_tenant_name, - project_domain_name=client_config.swift_project_domain_name, - user_domain_name=client_config.swift_user_domain_name, - ) - session = keystone_session.Session(auth=auth) - - return swift.Connection( - session=session, - insecure=client_config.swift_auth_insecure, - cacert=client_config.swift_ca_cert_file, - retries=client_config.swift_retry_attempts, - starting_backoff=client_config.swift_retry_backoff, - ) diff --git a/karbor/services/protection/clients/trove.py b/karbor/services/protection/clients/trove.py deleted file mode 100644 index 4757d60c..00000000 --- a/karbor/services/protection/clients/trove.py +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from troveclient import client as tc - -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import config -from karbor.services.protection.clients import utils - -LOG = logging.getLogger(__name__) - -SERVICE = "trove" -trove_client_opts = [ - cfg.StrOpt(SERVICE + '_endpoint', - help='URL of the trove endpoint.'), - cfg.StrOpt(SERVICE + '_catalog_info', - default='database:trove:publicURL', - help='Info to match when looking for trove in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if trove_endpoint is unset'), - cfg.StrOpt(SERVICE + '_ca_cert_file', - default=None, - help='Location of the CA certificate file ' - 'to use for client requests in SSL connections.'), - cfg.BoolOpt(SERVICE + '_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to trove.'), -] - -CONFIG_GROUP = '%s_client' % SERVICE -CONF = cfg.CONF -CONF.register_opts(config.service_client_opts, group=CONFIG_GROUP) -CONF.register_opts(trove_client_opts, group=CONFIG_GROUP) -CONF.set_default('service_name', 'trove', CONFIG_GROUP) -CONF.set_default('service_type', 'database', CONFIG_GROUP) - -TROVECLIENT_VERSION = '1.0' - - -def create(context, conf, **kwargs): - conf.register_opts(trove_client_opts, group=CONFIG_GROUP) - - client_config = conf[CONFIG_GROUP] - url = utils.get_url(SERVICE, context, client_config, - append_project_fmt='%(url)s/%(project)s', **kwargs) - endpoint = url % {"tenant_id": context.project_id} - LOG.debug('Creating trove client with url %s.', endpoint) - - if kwargs.get('session'): - return tc.Client(TROVECLIENT_VERSION, session=kwargs.get('session'), - endpoint_override=endpoint) - - args = { - 'input_auth_token': context.auth_token, - 'project_id': context.project_id, - 'service_catalog_url': endpoint, - 'cacert': client_config.trove_ca_cert_file, - 'insecure': client_config.trove_auth_insecure, - } - client = tc.Client(TROVECLIENT_VERSION, **args) - client.client.auth_token = context.auth_token - client.client.management_url = endpoint - return client diff --git a/karbor/services/protection/clients/utils.py b/karbor/services/protection/clients/utils.py deleted file mode 100644 index 62a41b98..00000000 --- a/karbor/services/protection/clients/utils.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor import exception -from karbor.i18n import _ - - -def _parse_service_catalog_info(config, context): - try: - service_type, service_name, endpoint_type = config.split(':') - except ValueError: - msg = _("Failed to parse the catalog info option %s, " - "must be in the form: " - "::" - ) % config - raise exception.KarborException(msg) - - for entry in context.service_catalog: - if entry.get('type') == service_type: - return entry.get('endpoints')[0].get(endpoint_type) - - -def _parse_service_endpoint(endpoint_url, context, append_project_fmt=None): - if not endpoint_url: - return None - - if not append_project_fmt: - return endpoint_url - - return append_project_fmt % { - 'url': endpoint_url, - 'project': context.project_id, - } - - -def get_url(service, context, client_config, - append_project_fmt=None, **kwargs): - '''Return the url of given service endpoint.''' - - url = "" - privileged_user = kwargs.get('privileged_user') - # get url by endpoint - if privileged_user is not True: - try: - url = _parse_service_endpoint( - getattr(client_config, '%s_endpoint' % service), - context, append_project_fmt) - if url: - return url - except Exception: - pass - - # get url by catalog - try: - url = _parse_service_catalog_info( - getattr(client_config, '%s_catalog_info' % service), context) - if url: - return url - except Exception: - pass - - # get url by accessing keystone - try: - keystone_plugin = kwargs.get('keystone_plugin') - url = keystone_plugin.get_service_endpoint( - client_config.service_name, client_config.service_type, - client_config.region_id, client_config.interface) - - url = url.replace("$", "%") - except Exception: - pass - - if url: - return url - - raise exception.KarborException( - _("Couldn't find the endpoint of service(%s)") % service) diff --git a/karbor/services/protection/flows/__init__.py b/karbor/services/protection/flows/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/flows/copy.py b/karbor/services/protection/flows/copy.py deleted file mode 100644 index 66508b0e..00000000 --- a/karbor/services/protection/flows/copy.py +++ /dev/null @@ -1,191 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.common import constants -from karbor import exception -from karbor.resource import Resource -from karbor.services.protection.flows import utils -from karbor.services.protection import resource_flow -from oslo_log import log as logging -from oslo_utils import timeutils - -from oslo_serialization import jsonutils - -from taskflow import task - -LOG = logging.getLogger(__name__) - - -class InitiateCopyTask(task.Task): - def execute(self, context, checkpoint, checkpoint_copy, operation_log, - *args, **kwargs): - LOG.debug("Initiate copy checkpoint_id: %s", checkpoint_copy.id) - checkpoint_copy.status = constants.CHECKPOINT_STATUS_COPYING - checkpoint_copy.commit() - update_fields = {"status": checkpoint_copy.status} - utils.update_operation_log(context, operation_log, update_fields) - - def revert(self, context, checkpoint, checkpoint_copy, operation_log, - *args, **kwargs): - LOG.debug("Failed to copy checkpoint_id: %s", checkpoint_copy.id) - checkpoint_copy.status = constants.CHECKPOINT_STATUS_ERROR - checkpoint_copy.commit() - update_fields = { - "status": checkpoint_copy.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -class CompleteCopyTask(task.Task): - def execute(self, context, checkpoint, checkpoint_copy, operation_log): - LOG.debug("Complete copy checkpoint_id: %s", checkpoint_copy.id) - checkpoint_copy.status = constants.CHECKPOINT_STATUS_AVAILABLE - if checkpoint_copy.extra_info: - extra_info = jsonutils.loads(checkpoint_copy.extra_info) - extra_info['copy_status'] = \ - constants.CHECKPOINT_STATUS_COPY_FINISHED - else: - extra_info = { - 'copy_status': constants.CHECKPOINT_STATUS_COPY_FINISHED} - checkpoint_copy.extra_info = jsonutils.dumps(extra_info) - checkpoint_copy.commit() - update_fields = { - "status": checkpoint_copy.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -def get_flow(context, protectable_registry, workflow_engine, plan, provider, - checkpoint, checkpoint_copy): - resources = set(Resource(**item) for item in plan.get("resources")) - resource_graph = protectable_registry.build_graph(context, - resources) - checkpoint_copy.resource_graph = resource_graph - checkpoint_copy.commit() - operation_log = utils.create_operation_log(context, checkpoint_copy, - constants.OPERATION_COPY) - flow_name = "Copy_" + plan.get('id') + checkpoint.id - copy_flow = workflow_engine.build_flow(flow_name, 'linear') - plugins = provider.load_plugins() - parameters = {} - parameters.update(plan.get('parameters', {})) - parameters['checkpoint'] = checkpoint - parameters['checkpoint_copy'] = checkpoint_copy - parameters['operation_log'] = operation_log - resources_task_flow = resource_flow.build_resource_flow( - operation_type=constants.OPERATION_COPY, - context=context, - workflow_engine=workflow_engine, - resource_graph=resource_graph, - plugins=plugins, - parameters=parameters, - ) - store_dict = {'context': context, - 'checkpoint': checkpoint, - 'checkpoint_copy': checkpoint_copy, - 'operation_log': operation_log - } - workflow_engine.add_tasks( - copy_flow, - InitiateCopyTask(name='InitiateCopyTask_' + checkpoint_copy.id, - inject=store_dict), - resources_task_flow, - CompleteCopyTask(name='CompleteCopyTask_' + checkpoint_copy.id, - inject=store_dict), - ) - return copy_flow - - -def get_flows(context, protectable_registry, workflow_engine, plan, provider, - checkpoints, checkpoint_collection): - checkpoints_protect_copy = prepare_create_flows( - context, plan, checkpoints, checkpoint_collection) - - copy_flows = create_flows( - context, protectable_registry, workflow_engine, plan, provider, - checkpoints_protect_copy, checkpoint_collection) - - return copy_flows, checkpoints_protect_copy - - -def prepare_create_flows(context, plan, checkpoints, checkpoint_collection): - LOG.debug("Creating checkpoint copy for plan. plan: %s", plan.id) - checkpoints_protect_copy = [] - for checkpoint in checkpoints: - extra_info = checkpoint.get("extra_info", None) - copy_status = None - if extra_info: - extra_info = jsonutils.loads(extra_info) - copy_status = extra_info.get('copy_status', None) - if (checkpoint.get("status") != - constants.CHECKPOINT_STATUS_AVAILABLE) or ( - copy_status == - constants.CHECKPOINT_STATUS_COPY_FINISHED): - continue - checkpoint_dict = { - 'project_id': context.project_id, - 'status': constants.CHECKPOINT_STATUS_WAIT_COPYING, - 'provider_id': checkpoint.get("provider_id"), - "protection_plan": checkpoint.get("protection_plan"), - "extra_info": {} - } - checkpoint_copy = checkpoint_collection.create(plan, - checkpoint_dict) - checkpoint_protect_copy = { - 'checkpoint_protect_id': checkpoint.get("id"), - 'checkpoint_copy_id': checkpoint_copy.id - } - checkpoints_protect_copy.append(checkpoint_protect_copy) - LOG.debug("The protect and copy checkpoints . checkpoints_copy: %s", - checkpoints_protect_copy) - return checkpoints_protect_copy - - -def create_flows(context, protectable_registry, workflow_engine, - plan, provider, checkpoints_protect_copy, - checkpoint_collection): - LOG.debug("Creating flows for the plan. checkpoints: %s", - checkpoints_protect_copy) - flow_name = "Copy_flows" + plan.get('id') - copy_flows = workflow_engine.build_flow(flow_name, 'linear') - for checkpoint_protect_copy in checkpoints_protect_copy: - checkpoint_protect_id = checkpoint_protect_copy.get( - "checkpoint_protect_id") - checkpoint_copy_id = checkpoint_protect_copy.get( - "checkpoint_copy_id") - checkpoint_protect = checkpoint_collection.get(checkpoint_protect_id) - checkpoint_copy = checkpoint_collection.get(checkpoint_copy_id) - try: - copy_flow = get_flow( - context, - protectable_registry, - workflow_engine, - plan, - provider, - checkpoint_protect, - checkpoint_copy, - ) - except Exception as e: - LOG.exception("Failed to create copy flow, checkpoint: %s", - checkpoint_protect_id) - raise exception.FlowError( - flow="copy", - error=e.msg if hasattr(e, 'msg') else 'Internal error') - workflow_engine.add_tasks(copy_flows, copy_flow) - flows_engine = workflow_engine.get_engine(copy_flows, store={ - 'context': context - }) - LOG.debug("Creating flows for the plan. copy_flows: %s", copy_flows) - - return flows_engine diff --git a/karbor/services/protection/flows/delete.py b/karbor/services/protection/flows/delete.py deleted file mode 100644 index ff63407e..00000000 --- a/karbor/services/protection/flows/delete.py +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.common import constants -from karbor.services.protection.flows import utils -from karbor.services.protection import resource_flow -from oslo_log import log as logging -from oslo_utils import timeutils - -from taskflow import task - -LOG = logging.getLogger(__name__) - - -class InitiateDeleteTask(task.Task): - def execute(self, context, checkpoint, operation_log, *args, **kwargs): - LOG.debug("Initiate delete checkpoint_id: %s", checkpoint.id) - checkpoint.status = constants.CHECKPOINT_STATUS_DELETING - checkpoint.commit() - update_fields = {"status": checkpoint.status} - utils.update_operation_log(context, operation_log, update_fields) - - def revert(self, context, checkpoint, operation_log, *args, **kwargs): - LOG.debug("Failed to delete checkpoint_id: %s", checkpoint.id) - checkpoint.status = constants.CHECKPOINT_STATUS_ERROR_DELETING - checkpoint.commit() - update_fields = { - "status": checkpoint.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -class CompleteDeleteTask(task.Task): - def execute(self, context, checkpoint, operation_log): - LOG.debug("Complete delete checkpoint_id: %s", checkpoint.id) - checkpoint.delete() - update_fields = { - "status": checkpoint.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -def get_flow(context, workflow_engine, checkpoint, provider): - LOG.info("Start get checkpoint flow, checkpoint_id: %s", checkpoint.id) - flow_name = "Delete_Checkpoint_" + checkpoint.id - delete_flow = workflow_engine.build_flow(flow_name, 'linear') - resource_graph = checkpoint.resource_graph - operation_log = utils.create_operation_log(context, checkpoint) - plugins = provider.load_plugins() - resources_task_flow = resource_flow.build_resource_flow( - operation_type=constants.OPERATION_DELETE, - context=context, - workflow_engine=workflow_engine, - resource_graph=resource_graph, - plugins=plugins, - parameters=None - ) - workflow_engine.add_tasks( - delete_flow, - InitiateDeleteTask(), - resources_task_flow, - CompleteDeleteTask(), - ) - flow_engine = workflow_engine.get_engine( - delete_flow, - store={ - 'context': context, - 'checkpoint': checkpoint, - 'operation_log': operation_log} - ) - return flow_engine diff --git a/karbor/services/protection/flows/protect.py b/karbor/services/protection/flows/protect.py deleted file mode 100644 index 965029dc..00000000 --- a/karbor/services/protection/flows/protect.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.common import constants -from karbor.resource import Resource -from karbor.services.protection.flows import utils -from karbor.services.protection import resource_flow -from oslo_log import log as logging -from oslo_utils import timeutils - -from taskflow import task - -LOG = logging.getLogger(__name__) - - -class InitiateProtectTask(task.Task): - def execute(self, context, checkpoint, operation_log, *args, **kwargs): - LOG.debug("Initiate protect checkpoint_id: %s", checkpoint.id) - checkpoint.status = constants.CHECKPOINT_STATUS_PROTECTING - checkpoint.commit() - update_fields = {"status": checkpoint.status} - utils.update_operation_log(context, operation_log, update_fields) - - def revert(self, context, checkpoint, operation_log, *args, **kwargs): - LOG.debug("Failed to protect checkpoint_id: %s", checkpoint.id) - checkpoint.status = constants.CHECKPOINT_STATUS_ERROR - checkpoint.commit() - update_fields = { - "status": checkpoint.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -class CompleteProtectTask(task.Task): - def execute(self, context, checkpoint, operation_log): - LOG.debug("Complete protect checkpoint_id: %s", checkpoint.id) - checkpoint.status = constants.CHECKPOINT_STATUS_AVAILABLE - checkpoint.commit() - update_fields = { - "status": checkpoint.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -def get_flow(context, protectable_registry, workflow_engine, plan, provider, - checkpoint): - resources = set(Resource(**item) for item in plan.get("resources")) - resource_graph = protectable_registry.build_graph(context, - resources) - checkpoint.resource_graph = resource_graph - checkpoint.commit() - operation_log = utils.create_operation_log(context, checkpoint) - flow_name = "Protect_" + plan.get('id') - protection_flow = workflow_engine.build_flow(flow_name, 'linear') - plugins = provider.load_plugins() - parameters = plan.get('parameters') - resources_task_flow = resource_flow.build_resource_flow( - operation_type=constants.OPERATION_PROTECT, - context=context, - workflow_engine=workflow_engine, - resource_graph=resource_graph, - plugins=plugins, - parameters=parameters, - ) - workflow_engine.add_tasks( - protection_flow, - InitiateProtectTask(), - resources_task_flow, - CompleteProtectTask(), - ) - flow_engine = workflow_engine.get_engine(protection_flow, store={ - 'context': context, - 'checkpoint': checkpoint, - 'operation_log': operation_log - }) - return flow_engine diff --git a/karbor/services/protection/flows/restore.py b/karbor/services/protection/flows/restore.py deleted file mode 100644 index c4c40eb3..00000000 --- a/karbor/services/protection/flows/restore.py +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from taskflow import task - -from karbor.common import constants -from karbor.services.protection.flows import utils -from karbor.services.protection import resource_flow - -sync_status_opts = [ - cfg.IntOpt('sync_status_interval', - default=20, - help='update protection status interval') -] - -CONF = cfg.CONF -CONF.register_opts(sync_status_opts) - -LOG = logging.getLogger(__name__) - - -class InitiateRestoreTask(task.Task): - def execute(self, context, restore, operation_log, *args, **kwargs): - LOG.debug("Initiate restore restore_id: %s", restore.id) - restore['status'] = constants.RESTORE_STATUS_IN_PROGRESS - restore.save() - update_fields = {"status": restore.status} - utils.update_operation_log(context, operation_log, update_fields) - - def revert(self, context, restore, operation_log, *args, **kwargs): - LOG.debug("Failed to restore restore_id: %s", restore.id) - restore['status'] = constants.RESTORE_STATUS_FAILURE - restore.save() - update_fields = { - "status": restore.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -class CompleteRestoreTask(task.Task): - def execute(self, context, restore, operation_log, *args, **kwargs): - LOG.debug("Complete restore restore_id: %s", restore.id) - restore['status'] = constants.RESTORE_STATUS_SUCCESS - restore.save() - update_fields = { - "status": restore.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -def get_flow(context, workflow_engine, checkpoint, provider, restore, - restore_auth): - resource_graph = checkpoint.resource_graph - operation_log = utils.create_operation_log_restore(context, restore) - parameters = restore.parameters - flow_name = "Restore_" + checkpoint.id - restore_flow = workflow_engine.build_flow(flow_name, 'linear') - plugins = provider.load_plugins() - resources_task_flow = resource_flow.build_resource_flow( - operation_type=constants.OPERATION_RESTORE, - context=context, - workflow_engine=workflow_engine, - resource_graph=resource_graph, - plugins=plugins, - parameters=parameters - ) - - workflow_engine.add_tasks( - restore_flow, - InitiateRestoreTask(), - resources_task_flow, - CompleteRestoreTask() - ) - flow_engine = workflow_engine.get_engine( - restore_flow, - store={ - 'context': context, - 'checkpoint': checkpoint, - 'restore': restore, - 'new_resources': {}, - 'operation_log': operation_log - } - ) - return flow_engine diff --git a/karbor/services/protection/flows/utils.py b/karbor/services/protection/flows/utils.py deleted file mode 100644 index b6dc0ced..00000000 --- a/karbor/services/protection/flows/utils.py +++ /dev/null @@ -1,120 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.common import constants -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.objects import base as objects_base -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -LOG = logging.getLogger(__name__) - - -def create_operation_log(context, checkpoint, operation_type=None): - checkpoint_dict = checkpoint.to_dict() - extra_info = checkpoint_dict.get('extra_info', None) - scheduled_operation_id = None - if extra_info: - extra_info_dict = jsonutils.loads(extra_info) - created_by = extra_info_dict.get('created_by', None) - if created_by == constants.OPERATION_ENGINE: - scheduled_operation_id = extra_info_dict.get( - 'scheduled_operation_id', None) - - protection_plan = checkpoint_dict['protection_plan'] - plan_id = None - provider_id = None - if protection_plan: - plan_id = protection_plan.get("id") - provider_id = protection_plan.get("provider_id") - operation_log_properties = { - 'project_id': checkpoint_dict['project_id'], - 'operation_type': ( - constants.OPERATION_PROTECT if operation_type is None - else operation_type), - 'checkpoint_id': checkpoint_dict['id'], - 'plan_id': plan_id, - 'provider_id': provider_id, - 'scheduled_operation_id': scheduled_operation_id, - 'status': checkpoint_dict['status'], - 'started_at': timeutils.utcnow() - } - try: - operation_log = objects.OperationLog(context=context, - **operation_log_properties) - operation_log.create() - return operation_log - except Exception: - LOG.error('Error creating operation log. checkpoint: %s', - checkpoint.id) - raise - - -def update_operation_log(context, operation_log, fields): - if not isinstance(operation_log, objects_base.KarborObject): - msg = _("The parameter must be a object of " - "KarborObject class.") - raise exception.InvalidInput(reason=msg) - - try: - operation_log.update(fields) - operation_log.save() - except Exception: - LOG.error('Error update operation log. operation_log: %s', - operation_log.id) - raise - - -def create_operation_log_restore(context, restore): - operation_log_properties = { - 'project_id': restore.get('project_id'), - 'operation_type': constants.OPERATION_RESTORE, - 'checkpoint_id': restore.get('checkpoint_id'), - 'plan_id': restore.get('plan_id', None), - 'provider_id': restore.get('provider_id'), - 'restore_id': restore.get('id'), - 'status': restore.get('status'), - 'started_at': timeutils.utcnow() - } - try: - operation_log = objects.OperationLog(context=context, - **operation_log_properties) - operation_log.create() - return operation_log - except Exception: - LOG.error('Error creating operation log. checkpoint: %s', - restore.id) - raise - - -def create_operation_log_verify(context, verify): - operation_log_properties = { - 'project_id': verify.get('project_id'), - 'operation_type': constants.OPERATION_VERIFY, - 'checkpoint_id': verify.get('checkpoint_id'), - 'plan_id': verify.get('plan_id', None), - 'provider_id': verify.get('provider_id'), - 'status': verify.get('status'), - 'started_at': timeutils.utcnow() - } - try: - operation_log = objects.OperationLog(context=context, - **operation_log_properties) - operation_log.create() - return operation_log - except Exception: - LOG.error('Error creating operation log. verify: %s', - verify.id) - raise diff --git a/karbor/services/protection/flows/verify.py b/karbor/services/protection/flows/verify.py deleted file mode 100644 index ac91bf07..00000000 --- a/karbor/services/protection/flows/verify.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from taskflow import task - -from karbor.common import constants -from karbor.services.protection.flows import utils -from karbor.services.protection import resource_flow - - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class InitiateVerifyTask(task.Task): - def execute(self, context, verify, operation_log, *args, **kwargs): - LOG.debug("Initiate verify verify_id: %s", verify.id) - verify['status'] = constants.VERIFICATION_STATUS_IN_PROGRESS - verify.save() - update_fields = {"status": verify.status} - utils.update_operation_log(context, operation_log, update_fields) - - def revert(self, context, verify, operation_log, *args, **kwargs): - LOG.debug("Failed to verify verify_id: %s", verify.id) - verify['status'] = constants.VERIFICATION_STATUS_FAILURE - verify.save() - update_fields = { - "status": verify.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -class CompleteVerifyTask(task.Task): - def execute(self, context, verify, operation_log, *args, **kwargs): - LOG.debug("Complete verify verify_id: %s", verify.id) - verify['status'] = constants.VERIFICATION_STATUS_SUCCESS - verify.save() - update_fields = { - "status": verify.status, - "ended_at": timeutils.utcnow() - } - utils.update_operation_log(context, operation_log, update_fields) - - -def get_flow(context, workflow_engine, checkpoint, provider, verify): - resource_graph = checkpoint.resource_graph - operation_log = utils.create_operation_log_verify(context, verify) - parameters = verify.parameters - flow_name = "Verify_" + checkpoint.id - verify_flow = workflow_engine.build_flow(flow_name, 'linear') - plugins = provider.load_plugins() - resources_task_flow = resource_flow.build_resource_flow( - operation_type=constants.OPERATION_VERIFY, - context=context, - workflow_engine=workflow_engine, - resource_graph=resource_graph, - plugins=plugins, - parameters=parameters - ) - - workflow_engine.add_tasks( - verify_flow, - InitiateVerifyTask(), - resources_task_flow, - CompleteVerifyTask() - ) - - flow_engine = workflow_engine.get_engine( - verify_flow, - store={ - 'context': context, - 'checkpoint': checkpoint, - 'verify': verify, - 'new_resources': {}, - 'operation_log': operation_log - } - ) - return flow_engine diff --git a/karbor/services/protection/flows/worker.py b/karbor/services/protection/flows/worker.py deleted file mode 100644 index 58822947..00000000 --- a/karbor/services/protection/flows/worker.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.flows import copy as flow_copy -from karbor.services.protection.flows import delete as flow_delete -from karbor.services.protection.flows import protect as flow_protect -from karbor.services.protection.flows import restore as flow_restore -from karbor.services.protection.flows import verify as flow_verify - -workflow_opts = [ - cfg.StrOpt( - 'workflow_engine', - default="karbor.services.protection.flows.workflow.TaskFlowEngine", - help='The workflow engine provides *flow* and *task* interface') -] - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.register_opts(workflow_opts) - - -class Worker(object): - def __init__(self, engine_path=None): - super(Worker, self).__init__() - try: - self.workflow_engine = self._load_engine(engine_path) - except Exception: - LOG.error("load work flow engine failed") - raise - - def _load_engine(self, engine_path): - if not engine_path: - engine_path = CONF.workflow_engine - engine = importutils.import_object(engine_path) - return engine - - def get_flow(self, context, operation_type, checkpoint, provider, - **kwargs): - if operation_type == constants.OPERATION_PROTECT: - plan = kwargs.get('plan', None) - protectable_registry = kwargs.get('protectable_registry', None) - flow = flow_protect.get_flow( - context, - protectable_registry, - self.workflow_engine, - plan, - provider, - checkpoint, - ) - elif operation_type == constants.OPERATION_RESTORE: - restore = kwargs.get('restore') - restore_auth = kwargs.get('restore_auth') - flow = flow_restore.get_flow( - context, - self.workflow_engine, - checkpoint, - provider, - restore, - restore_auth, - ) - elif operation_type == constants.OPERATION_VERIFY: - verify = kwargs.get('verify') - flow = flow_verify.get_flow( - context, - self.workflow_engine, - checkpoint, - provider, - verify - ) - elif operation_type == constants.OPERATION_DELETE: - flow = flow_delete.get_flow( - context, - self.workflow_engine, - checkpoint, - provider, - ) - elif operation_type == constants.OPERATION_COPY: - plan = kwargs.get('plan', None) - protectable_registry = kwargs.get('protectable_registry', None) - checkpoint_collection = kwargs.get('checkpoint_collection', None) - flow, checkpoint_copy = flow_copy.get_flows( - context, - protectable_registry, - self.workflow_engine, - plan, - provider, - checkpoint, - checkpoint_collection, - ) - return flow, checkpoint_copy - else: - raise exception.InvalidParameterValue( - err='unknown operation type %s' % operation_type - ) - - return flow - - def run_flow(self, flow_engine): - self.workflow_engine.run_engine(flow_engine) - - def flow_outputs(self, flow_engine, target=None): - return self.workflow_engine.output(flow_engine, target=target) diff --git a/karbor/services/protection/flows/workflow.py b/karbor/services/protection/flows/workflow.py deleted file mode 100644 index bffe7199..00000000 --- a/karbor/services/protection/flows/workflow.py +++ /dev/null @@ -1,184 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import futurist -import six - -from karbor import exception -from karbor.i18n import _ -from oslo_log import log as logging - -from taskflow import engines -from taskflow.patterns import graph_flow -from taskflow.patterns import linear_flow -from taskflow import task - - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class WorkFlowEngine(object): - @abc.abstractmethod - def build_flow(self, flow_name, flow_type='graph'): - """build flow - - :param flow_name: the flow name - :param flow_type: 'linear' or 'graph', default:'graph' - :return: linear flow or graph flow - """ - return - - @abc.abstractmethod - def get_engine(self, flow, **kwargs): - return - - @abc.abstractmethod - def run_engine(self, flow_engine): - return - - @abc.abstractmethod - def output(self, flow_engine, target=None): - return - - @abc.abstractmethod - def create_task(self, function, requires=None, provides=None, - inject=None, **kwargs): - """create a task - - :param function: make a task from this callable - :param requires: A OrderedSet of inputs this task requires to function. - :param provides: A set, string or list of items that this will be - providing (or could provide) to others - :param inject: An immutable input_name => value dictionary which - specifies any initial inputs that should be - automatically injected into the task scope before the - task execution commences - """ - return - - @abc.abstractmethod - def link_task(self, flow, u, v): - """Link existing node as a runtime dependency of existing node v - - :param u: task or flow to create a link from (must exist already) - :param v: task or flow to create a link to (must exist already) - :param flow: graph flow - """ - return - - @abc.abstractmethod - def add_tasks(self, flow, *nodes, **kwargs): - return - - @abc.abstractmethod - def search_task(self, flow, task_id): - return - - -class TaskFlowEngine(WorkFlowEngine): - def build_flow(self, flow_name, flow_type='graph'): - if flow_type == 'linear': - return linear_flow.Flow(flow_name) - elif flow_type == 'graph': - return graph_flow.Flow(flow_name) - else: - raise ValueError(_("unsupported flow type: %s") % flow_type) - - def get_engine(self, flow, **kwargs): - if flow is None: - LOG.error("The flow is None, build it first") - raise exception.InvalidTaskFlowObject( - reason=_("The flow is None")) - executor = kwargs.get('executor', None) - engine = kwargs.get('engine', None) - store = kwargs.get('store', None) - if not executor: - executor = futurist.GreenThreadPoolExecutor() - if not engine: - engine = 'parallel' - flow_engine = engines.load(flow, - executor=executor, - engine=engine, - store=store) - return flow_engine - - def karbor_flow_watch(self, state, details): - LOG.trace("The Flow [%s] OldState[%s] changed to State[%s]: ", - details.get('task_name'), details.get('old_state'), state) - - def karbor_atom_watch(self, state, details): - LOG.trace("The Task [%s] OldState[%s] changed to State[%s]: ", - details.get('task_name'), details.get('old_state'), state) - - def run_engine(self, flow_engine): - if flow_engine is None: - LOG.error("Flow engine is None,get it first") - raise exception.InvalidTaskFlowObject( - reason=_("The flow_engine is None")) - - flow_engine.notifier.register('*', self.karbor_flow_watch) - flow_engine.atom_notifier.register('*', self.karbor_atom_watch) - flow_engine.run() - - def output(self, flow_engine, target=None): - if flow_engine is None: - LOG.error("Flow engine is None,return nothing") - raise exception.InvalidTaskFlowObject( - reason=_("The flow_engine is None")) - if target: - return flow_engine.storage.fetch(target) - return flow_engine.storage.fetch_all() - - def create_task(self, function, requires=None, provides=None, - inject=None, **kwargs): - name = kwargs.get('name', None) - auto_extract = kwargs.get('auto_extract', True) - rebind = kwargs.get('rebind', None) - revert = kwargs.get('revert', None) - version = kwargs.get('version', None) - if function: - return task.FunctorTask(function, - name=name, - provides=provides, - requires=requires, - auto_extract=auto_extract, - rebind=rebind, - revert=revert, - version=version, - inject=inject) - - def link_task(self, flow, u, v): - if flow is None: - LOG.error("The flow is None, build it first") - raise exception.InvalidTaskFlowObject( - reason=_("The flow is None")) - if u and v: - flow.link(u, v) - - def add_tasks(self, flow, *nodes, **kwargs): - if flow is None: - LOG.error("The flow is None, get it first") - raise exception.InvalidTaskFlowObject( - reason=_("The flow is None")) - flow.add(*nodes, **kwargs) - - def search_task(self, flow, task_id): - if not isinstance(flow, graph_flow.Flow): - LOG.error("this is not a graph flow,flow name:%s", flow.name) - return - for node, meta in flow.iter_nodes(): - if not isinstance(node, task.FunctorTask): - continue - if task_id == getattr(node, 'name'): - return node diff --git a/karbor/services/protection/graph.py b/karbor/services/protection/graph.py deleted file mode 100644 index 161dfb88..00000000 --- a/karbor/services/protection/graph.py +++ /dev/null @@ -1,237 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc -from collections import namedtuple - -from oslo_log import log as logging -from oslo_serialization import jsonutils - -import six - -from karbor import exception -from karbor.i18n import _ -from karbor.resource import Resource - - -_GraphBuilderContext = namedtuple("_GraphBuilderContext", ( - "source_set", - "encountered_set", - "finished_nodes", - "get_child_nodes", -)) - -GraphNode = namedtuple("GraphNode", ( - "value", - "child_nodes", -)) - -PackedGraph = namedtuple('PackedGraph', ['nodes', 'adjacency']) - -LOG = logging.getLogger(__name__) - - -class FoundLoopError(RuntimeError): - def __init__(self): - super(FoundLoopError, self).__init__( - _("A loop was found in the graph")) - - -def _build_graph_rec(context, node): - LOG.trace("Entered node: %s", node) - source_set = context.source_set - encountered_set = context.encountered_set - finished_nodes = context.finished_nodes - LOG.trace("Gray set is %s", encountered_set) - if node in encountered_set: - raise FoundLoopError() - - LOG.trace("Black set is %s", finished_nodes.keys()) - if node in finished_nodes: - return finished_nodes[node] - - LOG.trace("Change to gray: %s", node) - encountered_set.add(node) - child_nodes = context.get_child_nodes(node) - LOG.trace("Child nodes are %s", child_nodes) - # If we found a parent than this is not a source - source_set.difference_update(child_nodes) - child_list = [] - for child_node in child_nodes: - child_list.append(_build_graph_rec(context, child_node)) - - LOG.trace("Change to black: %s", node) - encountered_set.discard(node) - graph_node = GraphNode(value=node, child_nodes=tuple(child_list)) - finished_nodes[node] = graph_node - - return graph_node - - -def build_graph(start_nodes, get_child_nodes_func): - context = _GraphBuilderContext( - source_set=set(start_nodes), - encountered_set=set(), - finished_nodes={}, - get_child_nodes=get_child_nodes_func, - ) - - result = [] - for node in start_nodes: - result.append(_build_graph_rec(context, node)) - - assert(len(context.encountered_set) == 0) - - return [item for item in result if item.value in context.source_set] - - -@six.add_metaclass(abc.ABCMeta) -class GraphWalkerListener(object): - """Interface for listening to GraphWaler events - - Classes that want to be able to use the graph walker to iterate over - a graph should implement this interface. - """ - @abc.abstractmethod - def on_node_enter(self, node, already_visited): - pass - - @abc.abstractmethod - def on_node_exit(self, node): - pass - - -class GraphWalker(object): - def __init__(self): - super(GraphWalker, self).__init__() - self._listeners = [] - - def register_listener(self, graph_walker_listener): - self._listeners.append(graph_walker_listener) - - def unregister_listener(self, graph_walker_listener): - self._listeners.remove(graph_walker_listener) - - def walk_graph(self, source_nodes): - self._walk_graph(source_nodes, set()) - - def _walk_graph(self, source_nodes, visited_nodes): - for node in source_nodes: - for listener in self._listeners: - listener.on_node_enter(node, node in visited_nodes) - visited_nodes.add(node) - - self._walk_graph(node.child_nodes, visited_nodes) - - for listener in self._listeners: - listener.on_node_exit(node) - - -class PackGraphWalker(GraphWalkerListener): - """Pack a list of GraphNode - - Allocate a serialized id (sid) for every node and build an adjacency list, - suitable for graph unpacking. - """ - def __init__(self, adjacency_list, nodes_dict): - super(PackGraphWalker, self).__init__() - self._sid_counter = 0 - self._node_to_sid = {} - self._adjacency_list = adjacency_list - self._sid_to_node = nodes_dict - - def on_node_enter(self, node, already_visited): - pass - - def on_node_exit(self, node): - def key_serialize(key): - return hex(key) - - if node not in self._node_to_sid: - node_sid = self._sid_counter - self._sid_counter += 1 - self._node_to_sid[node] = node_sid - self._sid_to_node[key_serialize(node_sid)] = node.value - - if len(node.child_nodes) > 0: - children_sids = map(lambda node: - key_serialize(self._node_to_sid[node]), - node.child_nodes) - self._adjacency_list.append( - (key_serialize(node_sid), tuple(children_sids)) - ) - - -def pack_graph(start_nodes): - """Return a PackedGraph from a list of GraphNodes - - Packs a graph into a flat PackedGraph (nodes dictionary, adjacency list). - """ - walker = GraphWalker() - nodes_dict = {} - adjacency_list = [] - packer = PackGraphWalker(adjacency_list, nodes_dict) - walker.register_listener(packer) - walker.walk_graph(start_nodes) - return PackedGraph(nodes_dict, tuple(adjacency_list)) - - -def unpack_graph(packed_graph): - """Return a list of GraphNodes from a PackedGraph - - Unpacks a PackedGraph, which must have the property: each parent node in - the adjacency list appears after its children. - """ - (nodes, adjacency_list) = packed_graph - nodes_dict = dict(nodes) - graph_nodes_dict = {} - - for (parent_sid, children_sids) in adjacency_list: - if parent_sid in graph_nodes_dict: - raise exception.InvalidInput( - reason=_("PackedGraph adjacency list must be topologically " - "ordered")) - children = [] - for child_sid in children_sids: - if child_sid not in graph_nodes_dict: - graph_nodes_dict[child_sid] = GraphNode( - nodes_dict[child_sid], ()) - children.append(graph_nodes_dict[child_sid]) - nodes_dict.pop(child_sid, None) - graph_nodes_dict[parent_sid] = GraphNode(nodes_dict[parent_sid], - tuple(children)) - - result_nodes = [] - for sid in nodes_dict: - if sid not in graph_nodes_dict: - graph_nodes_dict[sid] = GraphNode(nodes_dict[sid], ()) - result_nodes.append(graph_nodes_dict[sid]) - return result_nodes - - -def serialize_resource_graph(resource_graph): - packed_resource_graph = pack_graph(resource_graph) - return jsonutils.dumps( - packed_resource_graph, - default=lambda r: (r.type, r.id, r.name, r.extra_info)) - - -def deserialize_resource_graph(serialized_resource_graph): - deserialized_graph = jsonutils.loads(serialized_resource_graph) - packed_resource_graph = PackedGraph(nodes=deserialized_graph[0], - adjacency=deserialized_graph[1]) - for sid, node in packed_resource_graph.nodes.items(): - packed_resource_graph.nodes[sid] = Resource(type=node[0], - id=node[1], - name=node[2], - extra_info=node[3]) - resource_graph = unpack_graph(packed_resource_graph) - return resource_graph diff --git a/karbor/services/protection/manager.py b/karbor/services/protection/manager.py deleted file mode 100644 index 19e22b45..00000000 --- a/karbor/services/protection/manager.py +++ /dev/null @@ -1,572 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Protection Service -""" - -from datetime import datetime -from eventlet import greenpool -from eventlet import greenthread -import six - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging - -from oslo_utils import uuidutils - -from karbor.common import constants -from karbor import exception -from karbor.i18n import _ -from karbor import manager -from karbor.resource import Resource -from karbor.services.protection.flows import worker as flow_manager -from karbor.services.protection.protectable_registry import ProtectableRegistry -from karbor import utils - -LOG = logging.getLogger(__name__) - -protection_manager_opts = [ - cfg.StrOpt('provider_registry', - default='provider-registry', - help='the provider registry'), - cfg.IntOpt('max_concurrent_operations', - default=0, - help='number of maximum concurrent operation (protect, restore,' - ' delete) flows. 0 means no hard limit' - ) -] - -CONF = cfg.CONF -CONF.register_opts(protection_manager_opts) - -PROVIDER_NAMESPACE = 'karbor.provider' - - -class ProtectionManager(manager.Manager): - """karbor Protection Manager.""" - - RPC_API_VERSION = '1.0' - - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, service_name=None, - *args, **kwargs): - super(ProtectionManager, self).__init__(*args, **kwargs) - provider_reg = CONF.provider_registry - self.provider_registry = utils.load_plugin(PROVIDER_NAMESPACE, - provider_reg) - self.protectable_registry = ProtectableRegistry() - self.protectable_registry.load_plugins() - self.worker = flow_manager.Worker() - self._greenpool = None - self._greenpool_size = CONF.max_concurrent_operations - if self._greenpool_size != 0: - self._greenpool = greenpool.GreenPool(self._greenpool_size) - - def _spawn(self, func, *args, **kwargs): - if self._greenpool is not None: - return self._greenpool.spawn_n(func, *args, **kwargs) - else: - return greenthread.spawn_n(func, *args, **kwargs) - - def init_host(self, **kwargs): - """Handle initialization if this is a standalone service""" - # TODO(wangliuan) - LOG.info("Starting protection service") - - @messaging.expected_exceptions(exception.InvalidPlan, - exception.ProviderNotFound, - exception.FlowError) - def protect(self, context, plan, checkpoint_properties=None): - """create protection for the given plan - - :param plan: Define that protection plan should be done - """ - - LOG.info("Starting protection service:protect action") - LOG.debug("protecting: %s checkpoint_properties:%s", - plan, checkpoint_properties) - - if not plan: - raise exception.InvalidPlan( - reason=_('the protection plan is None')) - provider_id = plan.get('provider_id', None) - plan_id = plan.get('id', None) - provider = self.provider_registry.show_provider(provider_id) - checkpoint_collection = provider.get_checkpoint_collection() - try: - checkpoint = checkpoint_collection.create(plan, - checkpoint_properties, - context=context) - except Exception as e: - LOG.exception("Failed to create checkpoint, plan: %s", plan_id) - exc = exception.FlowError(flow="protect", - error="Error creating checkpoint") - six.raise_from(exc, e) - try: - flow = self.worker.get_flow( - context=context, - protectable_registry=self.protectable_registry, - operation_type=constants.OPERATION_PROTECT, - plan=plan, - provider=provider, - checkpoint=checkpoint) - except Exception as e: - LOG.exception("Failed to create protection flow, plan: %s", - plan_id) - raise exception.FlowError( - flow="protect", - error=e.msg if hasattr(e, 'msg') else 'Internal error') - self._spawn(self.worker.run_flow, flow) - return checkpoint.id - - @messaging.expected_exceptions(exception.InvalidPlan, - exception.ProviderNotFound, - exception.FlowError) - def copy(self, context, plan): - """create copy of checkpoint for the given plan - - :param plan: Define that protection plan should be done - """ - - LOG.info("Starting protection service:copy action.") - LOG.debug("Creating the checkpoint copy for the plan: %s", plan) - - if not plan: - raise exception.InvalidPlan( - reason=_('The protection plan is None')) - provider_id = plan.get('provider_id', None) - plan_id = plan.get('id', None) - provider = self.provider_registry.show_provider(provider_id) - checkpoints = None - checkpoint_collection = provider.get_checkpoint_collection() - try: - checkpoints = self.list_checkpoints(context, provider_id, - filters={'plan_id': plan_id}) - except Exception as e: - LOG.exception("Failed to get checkpoints for the plan: %s", - plan_id) - exc = exception.FlowError(flow="copy", - error="Failed to get checkpoints") - six.raise_from(exc, e) - try: - flow, checkpoint_copy = self.worker.get_flow( - context=context, - protectable_registry=self.protectable_registry, - operation_type=constants.OPERATION_COPY, - plan=plan, - provider=provider, - checkpoint=checkpoints, - checkpoint_collection=checkpoint_collection) - except Exception as e: - LOG.exception("Failed to create copy flow, plan: %s", - plan_id) - raise exception.FlowError( - flow="copy", - error=e.msg if hasattr(e, 'msg') else 'Internal error') - self._spawn(self.worker.run_flow, flow) - return checkpoint_copy - - @messaging.expected_exceptions(exception.ProviderNotFound, - exception.CheckpointNotFound, - exception.CheckpointNotAvailable, - exception.FlowError, - exception.InvalidInput, - exception.AccessCheckpointNotAllowed) - def restore(self, context, restore, restore_auth): - LOG.info("Starting restore service:restore action") - - checkpoint_id = restore["checkpoint_id"] - provider_id = restore["provider_id"] - provider = self.provider_registry.show_provider(provider_id) - if not provider: - raise exception.ProviderNotFound(provider_id=provider_id) - - self.validate_restore_parameters(restore, provider) - - checkpoint_collection = provider.get_checkpoint_collection() - checkpoint = checkpoint_collection.get(checkpoint_id) - - if not context.is_admin and ( - checkpoint.project_id != context.project_id): - raise exception.AccessCheckpointNotAllowed( - checkpoint_id=checkpoint_id) - - if checkpoint.status != constants.CHECKPOINT_STATUS_AVAILABLE: - raise exception.CheckpointNotAvailable( - checkpoint_id=checkpoint_id) - - try: - flow = self.worker.get_flow( - context=context, - operation_type=constants.OPERATION_RESTORE, - checkpoint=checkpoint, - provider=provider, - restore=restore, - restore_auth=restore_auth) - except Exception: - LOG.exception("Failed to create restore flow checkpoint: %s", - checkpoint_id) - raise exception.FlowError( - flow="restore", - error=_("Failed to create flow")) - self._spawn(self.worker.run_flow, flow) - - @messaging.expected_exceptions(exception.ProviderNotFound, - exception.CheckpointNotFound, - exception.CheckpointNotAvailable, - exception.FlowError, - exception.InvalidInput) - def verification(self, context, verification): - LOG.info("Starting verify service:verify action") - - checkpoint_id = verification["checkpoint_id"] - provider_id = verification["provider_id"] - provider = self.provider_registry.show_provider(provider_id) - if not provider: - raise exception.ProviderNotFound(provider_id=provider_id) - - self.validate_verify_parameters(verification, provider) - - checkpoint_collection = provider.get_checkpoint_collection() - checkpoint = checkpoint_collection.get(checkpoint_id) - - if checkpoint.status != constants.CHECKPOINT_STATUS_AVAILABLE: - raise exception.CheckpointNotAvailable( - checkpoint_id=checkpoint_id) - - try: - flow = self.worker.get_flow( - context=context, - operation_type=constants.OPERATION_VERIFY, - checkpoint=checkpoint, - provider=provider, - verify=verification) - except Exception: - LOG.exception("Failed to create verify flow checkpoint: %s", - checkpoint_id) - raise exception.FlowError( - flow="verify", - error=_("Failed to create flow")) - self._spawn(self.worker.run_flow, flow) - - def validate_restore_parameters(self, restore, provider): - parameters = restore["parameters"] - if not parameters: - return - restore_schema = provider.extended_info_schema.get( - "restore_schema", None) - if restore_schema is None: - msg = _("The restore schema of plugin must be provided.") - raise exception.InvalidInput(reason=msg) - for resource_key, parameter_value in parameters.items(): - if "#" in resource_key: - resource_type, resource_id = resource_key.split("#") - if not uuidutils.is_uuid_like(resource_id): - msg = _("The resource_id must be a uuid.") - raise exception.InvalidInput(reason=msg) - else: - resource_type = resource_key - if (resource_type not in constants.RESOURCE_TYPES) or ( - resource_type not in restore_schema): - msg = _("The key of restore parameters is invalid.") - raise exception.InvalidInput(reason=msg) - properties = restore_schema[resource_type]["properties"] - if not set(parameter_value.keys()).issubset( - set(properties.keys())): - msg = _("The restore property of restore parameters " - "is invalid.") - raise exception.InvalidInput(reason=msg) - - def validate_verify_parameters(self, verify, provider): - parameters = verify["parameters"] - if not parameters: - return - verify_schema = provider.extended_info_schema.get( - "verify_schema", None) - if verify_schema is None: - msg = _("The verify schema of plugin must be provided.") - raise exception.InvalidInput(reason=msg) - for resource_key, parameter_value in parameters.items(): - if "#" in resource_key: - resource_type, resource_id = resource_key.split("#") - if not uuidutils.is_uuid_like(resource_id): - msg = _("The resource_id must be a uuid.") - raise exception.InvalidInput(reason=msg) - else: - resource_type = resource_key - if (resource_type not in constants.RESOURCE_TYPES) or ( - resource_type not in verify_schema): - msg = _("The key of verify parameters is invalid.") - raise exception.InvalidInput(reason=msg) - properties = verify_schema[resource_type]["properties"] - if not set(parameter_value.keys()).issubset( - set(properties.keys())): - msg = _("The verify property of verify parameters " - "is invalid.") - raise exception.InvalidInput(reason=msg) - - @messaging.expected_exceptions(exception.DeleteCheckpointNotAllowed) - def delete(self, context, provider_id, checkpoint_id): - LOG.info("Starting protection service:delete action") - LOG.debug('provider_id :%s checkpoint_id:%s', provider_id, - checkpoint_id) - provider = self.provider_registry.show_provider(provider_id) - try: - checkpoint_collection = provider.get_checkpoint_collection() - checkpoint = checkpoint_collection.get(checkpoint_id, - context=context) - except Exception: - LOG.error("get checkpoint failed, checkpoint_id:%s", - checkpoint_id) - raise exception.InvalidInput( - reason=_("Invalid checkpoint_id or provider_id")) - - checkpoint_dict = checkpoint.to_dict() - if not context.is_admin and ( - context.project_id != checkpoint_dict['project_id']): - LOG.warning("Delete checkpoint(%s) is not allowed.", - checkpoint_id) - raise exception.DeleteCheckpointNotAllowed( - checkpoint_id=checkpoint_id) - - if checkpoint.status not in [ - constants.CHECKPOINT_STATUS_AVAILABLE, - constants.CHECKPOINT_STATUS_ERROR, - ]: - raise exception.CheckpointNotBeDeleted( - checkpoint_id=checkpoint_id) - checkpoint.status = constants.CHECKPOINT_STATUS_DELETING - checkpoint.commit() - - try: - flow = self.worker.get_flow( - context=context, - operation_type=constants.OPERATION_DELETE, - checkpoint=checkpoint, - provider=provider) - except Exception: - LOG.exception("Failed to create delete checkpoint flow," - "checkpoint:%s.", checkpoint_id) - raise exception.KarborException(_( - "Failed to create delete checkpoint flow." - )) - self._spawn(self.worker.run_flow, flow) - - @messaging.expected_exceptions(exception.AccessCheckpointNotAllowed, - exception.CheckpointNotBeReset) - def reset_state(self, context, provider_id, checkpoint_id, state): - provider = self.provider_registry.show_provider(provider_id) - - checkpoint = provider.get_checkpoint(checkpoint_id, context=context) - checkpoint_dict = checkpoint.to_dict() - if not context.is_admin and ( - context.project_id != checkpoint_dict['project_id']): - raise exception.AccessCheckpointNotAllowed( - checkpoint_id=checkpoint_id) - - if checkpoint.status not in [ - constants.CHECKPOINT_STATUS_AVAILABLE, - constants.CHECKPOINT_STATUS_ERROR, - constants.CHECKPOINT_STATUS_COPYING, - constants.CHECKPOINT_STATUS_WAIT_COPYING, - constants.CHECKPOINT_STATUS_COPY_FINISHED - ]: - raise exception.CheckpointNotBeReset( - checkpoint_id=checkpoint_id) - checkpoint.status = state - checkpoint.commit() - - def start(self, plan): - # TODO(wangliuan) - pass - - def suspend(self, plan): - # TODO(wangliuan) - pass - - @messaging.expected_exceptions(exception.ProviderNotFound, - exception.CheckpointNotFound, - exception.BankListObjectsFailed) - def list_checkpoints(self, context, provider_id, marker=None, limit=None, - sort_keys=None, sort_dirs=None, filters=None, - all_tenants=False): - LOG.info("Starting list checkpoints. provider_id:%s", provider_id) - plan_id = filters.get("plan_id", None) - start_date = None - end_date = None - if filters.get("start_date", None): - start_date = datetime.strptime( - filters.get("start_date"), "%Y-%m-%d") - if filters.get("end_date", None): - end_date = datetime.strptime( - filters.get("end_date"), "%Y-%m-%d") - - sort_dir = None if sort_dirs is None else sort_dirs[0] - provider = self.provider_registry.show_provider(provider_id) - - if filters.get('project_id', None) and all_tenants: - project_id = filters.get('project_id') - all_tenants = False - else: - project_id = context.project_id - - checkpoint_ids = provider.list_checkpoints( - project_id, provider_id, limit=limit, marker=marker, - plan_id=plan_id, start_date=start_date, end_date=end_date, - sort_dir=sort_dir, context=context, all_tenants=all_tenants) - checkpoints = [] - for checkpoint_id in checkpoint_ids: - checkpoint = provider.get_checkpoint(checkpoint_id, - context=context) - checkpoints.append(checkpoint.to_dict()) - return checkpoints - - @messaging.expected_exceptions(exception.ProviderNotFound, - exception.CheckpointNotFound, - exception.AccessCheckpointNotAllowed) - def show_checkpoint(self, context, provider_id, checkpoint_id): - provider = self.provider_registry.show_provider(provider_id) - - checkpoint = provider.get_checkpoint(checkpoint_id, context=context) - checkpoint_dict = checkpoint.to_dict() - if not context.is_admin and ( - context.project_id != checkpoint_dict['project_id']): - raise exception.AccessCheckpointNotAllowed( - checkpoint_id=checkpoint_id) - return checkpoint_dict - - def list_protectable_types(self, context): - LOG.info("Start to list protectable types.") - return self.protectable_registry.list_resource_types() - - @messaging.expected_exceptions(exception.ProtectableTypeNotFound) - def show_protectable_type(self, context, protectable_type): - LOG.info("Start to show protectable type %s", protectable_type) - - plugin = self.protectable_registry.get_protectable_resource_plugin( - protectable_type) - if not plugin: - raise exception.ProtectableTypeNotFound( - protectable_type=protectable_type) - - dependents = [] - for t in self.protectable_registry.list_resource_types(): - if t == protectable_type: - continue - - p = self.protectable_registry.get_protectable_resource_plugin(t) - if p and protectable_type in p.get_parent_resource_types(): - dependents.append(t) - - return { - 'name': plugin.get_resource_type(), - "dependent_types": dependents - } - - @messaging.expected_exceptions(exception.ListProtectableResourceFailed) - def list_protectable_instances(self, context, - protectable_type=None, - marker=None, - limit=None, - sort_keys=None, - sort_dirs=None, - filters=None, - parameters=None): - - LOG.info("Start to list protectable instances of type: %s", - protectable_type) - - try: - resource_instances = self.protectable_registry.list_resources( - context, protectable_type, parameters) - except exception.ListProtectableResourceFailed as err: - LOG.error("List resources of type %(type)s failed: %(err)s", - {'type': protectable_type, 'err': six.text_type(err)}) - raise - - result = [] - for resource in resource_instances: - result.append(dict(id=resource.id, name=resource.name, - extra_info=resource.extra_info)) - - return result - - @messaging.expected_exceptions(exception.ListProtectableResourceFailed) - def show_protectable_instance(self, context, protectable_type, - protectable_id, parameters=None): - LOG.info("Start to show protectable instance of type: %s", - protectable_type) - - registry = self.protectable_registry - try: - resource_instance = registry.show_resource( - context, - protectable_type, - protectable_id, - parameters=parameters - ) - except exception.ListProtectableResourceFailed as err: - LOG.error("Show resources of type %(type)s id %(id)s " - "failed: %(err)s", - {'type': protectable_type, - 'id': protectable_id, - 'err': six.text_type(err)}) - raise - - return resource_instance.to_dict() if resource_instance else None - - @messaging.expected_exceptions(exception.ListProtectableResourceFailed) - def list_protectable_dependents(self, context, - protectable_id, - protectable_type, - protectable_name): - LOG.info("Start to list dependents of resource (type:%(type)s, " - "id:%(id)s, name:%(name)s)", - {'type': protectable_type, - 'id': protectable_id, - 'name': protectable_name}) - - parent_resource = Resource(type=protectable_type, id=protectable_id, - name=protectable_name) - - registry = self.protectable_registry - try: - dependent_resources = registry.fetch_dependent_resources( - context, parent_resource) - except exception.ListProtectableResourceFailed as err: - LOG.error("List dependent resources of (%(res)s) failed: %(err)s", - {'res': parent_resource, - 'err': six.text_type(err)}) - raise - - return [resource.to_dict() for resource in dependent_resources] - - def list_providers(self, context, marker=None, limit=None, - sort_keys=None, sort_dirs=None, filters=None): - return self.provider_registry.list_providers(marker=marker, - limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters) - - @messaging.expected_exceptions(exception.ProviderNotFound) - def show_provider(self, context, provider_id): - provider = self.provider_registry.show_provider(provider_id) - response = {'id': provider.id, - 'name': provider.name, - 'description': provider.description, - 'extended_info_schema': provider.extended_info_schema, - } - return response diff --git a/karbor/services/protection/protectable_plugin.py b/karbor/services/protection/protectable_plugin.py deleted file mode 100644 index 54bffffb..00000000 --- a/karbor/services/protection/protectable_plugin.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class ProtectablePlugin(object): - """Base abstract class for protectable plugin. - - """ - - def __init__(self, context=None, conf=None): - super(ProtectablePlugin, self).__init__() - self._context = context - self._conf = conf - - def instance(self, context=None, conf=None): - return self.__class__(context, conf) - - @abc.abstractmethod - def get_resource_type(self): - """Return the resource type that this plugin supports. - - Subclasses can implement as a classmethod - """ - pass - - @abc.abstractmethod - def get_parent_resource_types(self): - """Return the possible parent resource types. - - Subclasses can implement as a classmethod - """ - pass - - @abc.abstractmethod - def list_resources(self, context, parameters=None): - """List resource instances of type this plugin supported. - - :return: The list of resource instance. - """ - pass - - @abc.abstractmethod - def show_resource(self, context, resource_id, parameters=None): - """Show resource detail information. - - """ - pass - - @abc.abstractmethod - def get_dependent_resources(self, context, parent_resource): - """List dependent resource instances. - - The listed resource instances are of type this plugin supported, - and dependent by the given parent resource. - - :param parent_resource: the parent resource instance. - :type parent_resource: one of parent resource types. - :return: the list of dependent resource instances. - """ - pass diff --git a/karbor/services/protection/protectable_plugins/__init__.py b/karbor/services/protection/protectable_plugins/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protectable_plugins/database.py b/karbor/services/protection/protectable_plugins/database.py deleted file mode 100644 index ceb5ef05..00000000 --- a/karbor/services/protection/protectable_plugins/database.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from karbor.common import constants -from karbor import exception -from karbor import resource -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protectable_plugin -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -INVALID_INSTANCE_STATUS = ['BUILD', 'REBOOT', 'RESIZE', 'ERROR'] - - -class DatabaseInstanceProtectablePlugin(protectable_plugin.ProtectablePlugin): - """Trove database instances protectable plugin""" - - _SUPPORT_RESOURCE_TYPE = constants.DATABASE_RESOURCE_TYPE - - def _client(self, context): - self._client_instance = ClientFactory.create_client( - "trove", - context) - - return self._client_instance - - def get_resource_type(self): - return self._SUPPORT_RESOURCE_TYPE - - def get_parent_resource_types(self): - return (constants.PROJECT_RESOURCE_TYPE, ) - - def list_resources(self, context, parameters=None): - try: - instances = self._client(context).instances.list() - except Exception as e: - LOG.exception("List all database instances from trove failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=instance.id, name=instance.name) - for instance in instances - if instance.status not in INVALID_INSTANCE_STATUS] - - def show_resource(self, context, resource_id, parameters=None): - try: - instance = self._client(context).instances.get(resource_id) - except Exception as e: - LOG.exception("Show a database instance from trove failed.") - raise exception.ProtectableResourceNotFound( - id=resource_id, - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - if instance.status in INVALID_INSTANCE_STATUS: - raise exception.ProtectableResourceInvalidStatus( - id=resource_id, type=self._SUPPORT_RESOURCE_TYPE, - status=instance.status) - return resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=instance.id, name=instance.name) - - def get_dependent_resources(self, context, parent_resource): - try: - instances = self._client(context).instances.list() - except Exception as e: - LOG.exception("List all database instances from trove failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=instance.id, - name=instance.name) - for instance in instances - if instance.project_id == parent_resource.id - and instance.status not in INVALID_INSTANCE_STATUS] diff --git a/karbor/services/protection/protectable_plugins/image.py b/karbor/services/protection/protectable_plugins/image.py deleted file mode 100644 index fd23de74..00000000 --- a/karbor/services/protection/protectable_plugins/image.py +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from karbor.common import constants -from karbor import exception -from karbor import resource -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protectable_plugin -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -INVALID_IMAGE_STATUS = ['killed', 'deleted', 'pending_delete', - 'deactivated'] - - -class ImageProtectablePlugin(protectable_plugin.ProtectablePlugin): - """Glance image protectable plugin""" - _SUPPORT_RESOURCE_TYPE = constants.IMAGE_RESOURCE_TYPE - - def _glance_client(self, context): - self._glance_client_instance = ClientFactory.create_client( - 'glance', context) - return self._glance_client_instance - - def _nova_client(self, context): - self._nova_client_instance = ClientFactory.create_client( - 'nova', context) - return self._nova_client_instance - - def get_resource_type(self): - return self._SUPPORT_RESOURCE_TYPE - - def get_parent_resource_types(self): - return (constants.SERVER_RESOURCE_TYPE, - constants.PROJECT_RESOURCE_TYPE,) - - def list_resources(self, context, parameters=None): - try: - images = self._glance_client(context).images.list() - except Exception as e: - LOG.exception("List all images from glance failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=image.id, name=image.name) - for image in images - if image.status not in INVALID_IMAGE_STATUS] - - def _get_dependent_resources_by_server(self, - context, - parent_resource): - try: - server = self._nova_client(context).servers.get(parent_resource.id) - except Exception as e: - LOG.exception("List all server from nova failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - - if not server.image: - return [] - try: - image = self._glance_client(context).images.get(server.image['id']) - except Exception as e: - LOG.exception("Getting image from glance failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=server.image['id'], - name=image.name, - extra_info={'server_id': server.id})] - - def _get_dependent_resources_by_project(self, - context, - parent_resource): - try: - images = self._glance_client(context).images.list() - except Exception as e: - LOG.exception("List all images from glance failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=image.id, - name=image.name) - for image in images - if image.owner == parent_resource.id - and image.status not in INVALID_IMAGE_STATUS] - - def show_resource(self, context, resource_id, parameters=None): - try: - image = self._glance_client(context).images.get(resource_id) - except Exception as e: - LOG.exception("Show a image from glance failed.") - raise exception.ProtectableResourceNotFound( - id=resource_id, - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - if image.status in INVALID_IMAGE_STATUS: - raise exception.ProtectableResourceInvalidStatus( - id=image.id, type=self._SUPPORT_RESOURCE_TYPE, - status=image.status) - return resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=image.id, name=image.name) - - def get_dependent_resources(self, context, parent_resource): - if parent_resource.type == constants.SERVER_RESOURCE_TYPE: - return self._get_dependent_resources_by_server(context, - parent_resource) - - if parent_resource.type == constants.PROJECT_RESOURCE_TYPE: - return self._get_dependent_resources_by_project(context, - parent_resource) - - return [] diff --git a/karbor/services/protection/protectable_plugins/network.py b/karbor/services/protection/protectable_plugins/network.py deleted file mode 100644 index 6661ecd8..00000000 --- a/karbor/services/protection/protectable_plugins/network.py +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from karbor.common import constants -from karbor import exception -from karbor import resource -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protectable_plugin -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - - -class NetworkProtectablePlugin(protectable_plugin.ProtectablePlugin): - """Protectable plugin implementation for Network from Neutron. - - """ - - _SUPPORT_RESOURCE_TYPE = constants.NETWORK_RESOURCE_TYPE - - def _neutron_client(self, cntxt): - return ClientFactory.create_client('neutron', cntxt) - - def _nova_client(self, cntxt): - return ClientFactory.create_client('nova', cntxt) - - def get_resource_type(self): - return self._SUPPORT_RESOURCE_TYPE - - def get_parent_resource_types(self): - return (constants.PROJECT_RESOURCE_TYPE) - - def _get_network_id(self): - """Set network_id as project_id - - Cause the network plugin include the ports, networks, - subnets, routes, securitygroups, So make the id for - the whole plugin-package info, not just like the server - plugin which have the real server-id for the server. - """ - - network_id = self._context.project_id - return network_id - - def list_resources(self, context, parameters=None): - try: - netclient = self._neutron_client(context) - networks = netclient.list_networks( - project_id=context.project_id).get('networks') - except Exception as e: - LOG.exception("List all summary networks from neutron failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - if networks: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=self._get_network_id(), - name="Network Topology")] - return [] - - def show_resource(self, context, resource_id, parameters=None): - try: - if resource_id != self._get_network_id(): - return None - - netclient = self._neutron_client(context) - networks = netclient.list_networks( - project_id=resource_id).get('networks') - except Exception as e: - LOG.exception("List all summary networks from neutron failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - if networks: - return resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=self._get_network_id(), - name="Network Topology") - return None - - def _get_dependent_resources_by_project(self, - context, - parent_resource): - try: - project_id = parent_resource.id - netclient = self._neutron_client(context) - networks = netclient.list_networks( - project_id=project_id).get('networks') - - if networks: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=self._get_network_id(), - name="Network Topology")] - else: - return [] - - except Exception as e: - LOG.exception("List all summary networks from neutron failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - - def get_dependent_resources(self, context, parent_resource): - return self._get_dependent_resources_by_project( - context, parent_resource) diff --git a/karbor/services/protection/protectable_plugins/pod.py b/karbor/services/protection/protectable_plugins/pod.py deleted file mode 100644 index d7af239b..00000000 --- a/karbor/services/protection/protectable_plugins/pod.py +++ /dev/null @@ -1,115 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six -import uuid - -from karbor.common import constants -from karbor import exception -from karbor import resource -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protectable_plugin -from oslo_config import cfg -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - -pod_protectable_opts = [ - cfg.StrOpt('namespace', - default='default', - help='The namespace name that kubernetes client use.') -] - - -def register_opts(conf): - conf.register_opts(pod_protectable_opts, group='pod_protectable') - - -INVALID_POD_STATUS = ['Pending', 'Failed', 'Unknown'] - - -class K8sPodProtectablePlugin(protectable_plugin.ProtectablePlugin): - """K8s pod protectable plugin""" - - _SUPPORT_RESOURCE_TYPE = constants.POD_RESOURCE_TYPE - - def __init__(self, context=None, config=None): - super(K8sPodProtectablePlugin, self).__init__(context, config) - self.namespace = None - if self._conf: - register_opts(self._conf) - plugin_cfg = self._conf.pod_protectable - self.namespace = plugin_cfg.namespace - - def _client(self, context): - self._client_instance = ClientFactory.create_client( - "k8s", context) - - return self._client_instance - - def get_resource_type(self): - return self._SUPPORT_RESOURCE_TYPE - - def get_parent_resource_types(self): - return (constants.PROJECT_RESOURCE_TYPE) - - def list_resources(self, context, parameters=None): - try: - pods = self._client(context).list_namespaced_pod(self.namespace) - except Exception as e: - LOG.exception("List all summary pods from kubernetes failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource( - type=self._SUPPORT_RESOURCE_TYPE, - id=uuid.uuid5(uuid.NAMESPACE_OID, "%s:%s" % ( - self.namespace, pod.metadata.name)), - name="%s:%s" % (self.namespace, pod.metadata.name), - extra_info={'namespace': self.namespace}) - for pod in pods.items - if pod.status.phase not in INVALID_POD_STATUS] - - def show_resource(self, context, resource_id, parameters=None): - try: - if not parameters: - raise - name = parameters.get("name", None) - if ":" in name: - pod_namespace, pod_name = name.split(":") - else: - pod_namespace = self.namespace - pod_name = name - pod = self._client(context).read_namespaced_pod( - pod_name, pod_namespace) - except Exception as e: - LOG.exception("Show a summary pod from kubernetes failed.") - raise exception.ProtectableResourceNotFound( - id=resource_id, - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - if pod.status.phase in INVALID_POD_STATUS: - raise exception.ProtectableResourceInvalidStatus( - id=resource_id, type=self._SUPPORT_RESOURCE_TYPE, - status=pod.status.phase) - return resource.Resource( - type=self._SUPPORT_RESOURCE_TYPE, - id=uuid.uuid5(uuid.NAMESPACE_OID, "%s:%s" % ( - self.namespace, pod.metadata.name)), - name="%s:%s" % (pod_namespace, pod.metadata.name), - extra_info={'namespace': pod_namespace}) - - def get_dependent_resources(self, context, parent_resource): - self.list_resources(context) diff --git a/karbor/services/protection/protectable_plugins/project.py b/karbor/services/protection/protectable_plugins/project.py deleted file mode 100644 index 89715c57..00000000 --- a/karbor/services/protection/protectable_plugins/project.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from karbor.common import constants -from karbor import resource -from karbor.services.protection import protectable_plugin - - -class ProjectProtectablePlugin(protectable_plugin.ProtectablePlugin): - """Keystone project protectable plugin""" - _SUPPORT_RESOURCE_TYPE = constants.PROJECT_RESOURCE_TYPE - - def get_resource_type(self): - return self._SUPPORT_RESOURCE_TYPE - - def get_parent_resource_types(self): - return () - - def list_resources(self, context, parameters=None): - # TODO(yuvalbr) handle admin context for multiple projects? - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=context.project_id, - name=context.project_name)] - - def get_dependent_resources(self, context, parent_resource): - pass - - def show_resource(self, context, resource_id, parameters=None): - # TODO(yinwei) get project name through keystone client - return resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=resource_id, - name=context.project_name) diff --git a/karbor/services/protection/protectable_plugins/server.py b/karbor/services/protection/protectable_plugins/server.py deleted file mode 100644 index 97c5153a..00000000 --- a/karbor/services/protection/protectable_plugins/server.py +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from karbor.common import constants -from karbor import exception -from karbor import resource -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protectable_plugin -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -INVALID_SERVER_STATUS = [ - 'DELETED', 'ERROR', 'UNKNOWN', 'SOFT_DELETED', 'RESCUED'] - - -class ServerProtectablePlugin(protectable_plugin.ProtectablePlugin): - """Nova server protectable plugin""" - - _SUPPORT_RESOURCE_TYPE = constants.SERVER_RESOURCE_TYPE - - def _client(self, context): - self._client_instance = ClientFactory.create_client( - "nova", - context) - - return self._client_instance - - def get_resource_type(self): - return self._SUPPORT_RESOURCE_TYPE - - def get_parent_resource_types(self): - return (constants.PROJECT_RESOURCE_TYPE, ) - - def list_resources(self, context, parameters=None): - try: - servers = self._client(context).servers.list(detailed=True) - except Exception as e: - LOG.exception("List all servers from nova failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=server.id, - name=server.name) - for server in servers - if server.status not in INVALID_SERVER_STATUS] - - def show_resource(self, context, resource_id, parameters=None): - try: - server = self._client(context).servers.get(resource_id) - except Exception as e: - LOG.exception("Show a server from nova failed.") - raise exception.ProtectableResourceNotFound( - id=resource_id, - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - if server.status in INVALID_SERVER_STATUS: - raise exception.ProtectableResourceInvalidStatus( - id=resource_id, type=self._SUPPORT_RESOURCE_TYPE, - status=server.status) - return resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=server.id, - name=server.name) - - def get_dependent_resources(self, context, parent_resource): - # Utilize list_resource here, cause its function is - # listing resources of given project - return self.list_resources(context) diff --git a/karbor/services/protection/protectable_plugins/share.py b/karbor/services/protection/protectable_plugins/share.py deleted file mode 100644 index 60a4bc2f..00000000 --- a/karbor/services/protection/protectable_plugins/share.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from karbor.common import constants -from karbor import exception -from karbor import resource -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protectable_plugin -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -INVALID_SHARE_STATUS = ['deleting', 'deleted', 'error', 'error_deleting', - 'manage_error', 'unmanage_error', 'extending_error', - 'shrinking_error', 'reverting_error'] - - -class ShareProtectablePlugin(protectable_plugin.ProtectablePlugin): - """Manila share protectable plugin""" - - _SUPPORT_RESOURCE_TYPE = constants.SHARE_RESOURCE_TYPE - - def _client(self, context): - self._client_instance = ClientFactory.create_client( - "manila", - context) - - return self._client_instance - - def get_resource_type(self): - return self._SUPPORT_RESOURCE_TYPE - - def get_parent_resource_types(self): - return (constants.PROJECT_RESOURCE_TYPE, ) - - def list_resources(self, context, parameters=None): - try: - shares = self._client(context).shares.list(detailed=True) - except Exception as e: - LOG.exception("List all summary shares from manila failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=share.id, name=share.name) - for share in shares - if share.status not in INVALID_SHARE_STATUS] - - def show_resource(self, context, resource_id, parameters=None): - try: - share = self._client(context).shares.get(resource_id) - except Exception as e: - LOG.exception("Show a summary share from manila failed.") - raise exception.ProtectableResourceNotFound( - id=resource_id, - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - if share.status in INVALID_SHARE_STATUS: - raise exception.ProtectableResourceInvalidStatus( - id=resource_id, type=self._SUPPORT_RESOURCE_TYPE, - status=share.status) - return resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=share.id, name=share.name) - - def get_dependent_resources(self, context, parent_resource): - try: - shares = self._client(context).shares.list() - except Exception as e: - LOG.exception("List all shares from manila failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE, - id=share.id, - name=share.name) - for share in shares - if share.project_id == parent_resource.id - and share.status not in INVALID_SHARE_STATUS] diff --git a/karbor/services/protection/protectable_plugins/volume.py b/karbor/services/protection/protectable_plugins/volume.py deleted file mode 100644 index 6281a8b5..00000000 --- a/karbor/services/protection/protectable_plugins/volume.py +++ /dev/null @@ -1,169 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from karbor.common import constants -from karbor import exception -from karbor import resource -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protectable_plugin -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -INVALID_VOLUME_STATUS = ['error', 'deleting', 'error_deleting'] - - -class VolumeProtectablePlugin(protectable_plugin.ProtectablePlugin): - """Cinder volume protectable plugin""" - - _SUPPORT_RESOURCE_TYPE = constants.VOLUME_RESOURCE_TYPE - - def _client(self, context): - self._client_instance = ClientFactory.create_client( - "cinder", - context) - - return self._client_instance - - def _k8s_client(self, context): - self._k8s_client_instance = ClientFactory.create_client( - "k8s", context) - - return self._k8s_client_instance - - def get_resource_type(self): - return self._SUPPORT_RESOURCE_TYPE - - def get_parent_resource_types(self): - return (constants.SERVER_RESOURCE_TYPE, - constants.POD_RESOURCE_TYPE, - constants.PROJECT_RESOURCE_TYPE) - - def list_resources(self, context, parameters=None): - try: - volumes = self._client(context).volumes.list(detailed=True) - except Exception as e: - LOG.exception("List all summary volumes from cinder failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource( - type=self._SUPPORT_RESOURCE_TYPE, - id=vol.id, name=vol.name, - extra_info={'availability_zone': vol.availability_zone}) - for vol in volumes - if vol.status not in INVALID_VOLUME_STATUS] - - def show_resource(self, context, resource_id, parameters=None): - try: - volume = self._client(context).volumes.get(resource_id) - except Exception as e: - LOG.exception("Show a summary volume from cinder failed.") - raise exception.ProtectableResourceNotFound( - id=resource_id, - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - if volume.status in INVALID_VOLUME_STATUS: - raise exception.ProtectableResourceInvalidStatus( - id=resource_id, type=self._SUPPORT_RESOURCE_TYPE, - status=volume.status) - return resource.Resource( - type=self._SUPPORT_RESOURCE_TYPE, - id=volume.id, name=volume.name, - extra_info={'availability_zone': volume.availability_zone}) - - def _get_dependent_resources_by_pod(self, context, parent_resource): - try: - name = parent_resource.name - pod_namespace, pod_name = name.split(":") - pod = self._k8s_client(context).read_namespaced_pod( - pod_name, pod_namespace) - if not pod.spec.volumes: - return [] - mounted_vol_list = [] - for volume in pod.spec.volumes: - volume_pvc = volume.persistent_volume_claim - volume_cinder = volume.cinder - if volume_pvc: - pvc_name = volume_pvc.claim_name - pvc = self._k8s_client( - context).read_namespaced_persistent_volume_claim( - pvc_name, pod_namespace) - pv_name = pvc.spec.volume_name - if pv_name: - pv = self._k8s_client( - context).read_persistent_volume(pv_name) - if pv.spec.cinder: - mounted_vol_list.append( - pv.spec.cinder.volume_id) - elif volume_cinder: - mounted_vol_list.append( - volume_cinder.volume_id) - - except Exception as e: - LOG.exception("Get mounted volumes from kubernetes " - "pod failed.") - raise exception.ProtectableResourceNotFound( - id=parent_resource.id, - type=parent_resource.type, - reason=six.text_type(e)) - try: - volumes = self._client(context).volumes.list(detailed=True) - except Exception as e: - LOG.exception("List all detailed volumes from cinder failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource( - type=self._SUPPORT_RESOURCE_TYPE, id=vol.id, name=vol.name, - extra_info={'availability_zone': vol.availability_zone}) - for vol in volumes if (vol.id in mounted_vol_list)] - - def _get_dependent_resources_by_server(self, context, parent_resource): - def _is_attached_to(vol): - if parent_resource.type == constants.SERVER_RESOURCE_TYPE: - return any([s.get('server_id') == parent_resource.id - for s in vol.attachments]) - if parent_resource.type == constants.PROJECT_RESOURCE_TYPE: - return getattr( - vol, - 'os-vol-tenant-attr:tenant_id' - ) == parent_resource.id - try: - volumes = self._client(context).volumes.list(detailed=True) - except Exception as e: - LOG.exception("List all detailed volumes from cinder failed.") - raise exception.ListProtectableResourceFailed( - type=self._SUPPORT_RESOURCE_TYPE, - reason=six.text_type(e)) - else: - return [resource.Resource( - type=self._SUPPORT_RESOURCE_TYPE, id=vol.id, name=vol.name, - extra_info={'availability_zone': vol.availability_zone}) - for vol in volumes if _is_attached_to(vol)] - - def get_dependent_resources(self, context, parent_resource): - if parent_resource.type in (constants.SERVER_RESOURCE_TYPE, - constants.PROJECT_RESOURCE_TYPE): - return self._get_dependent_resources_by_server(context, - parent_resource) - - if parent_resource.type == constants.POD_RESOURCE_TYPE: - return self._get_dependent_resources_by_pod(context, - parent_resource) - - return [] diff --git a/karbor/services/protection/protectable_registry.py b/karbor/services/protection/protectable_registry.py deleted file mode 100644 index 166e93a6..00000000 --- a/karbor/services/protection/protectable_registry.py +++ /dev/null @@ -1,120 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.exception import ListProtectableResourceFailed -from karbor.services.protection.graph import build_graph - -from oslo_config import cfg -from oslo_log import log as logging -from stevedore import extension - -LOG = logging.getLogger(__name__) - - -def _warn_missing_protectable(extmanager, ep, err): - LOG.warning("Could not load %(name)s: %(error)s") - - -class ProtectableRegistry(object): - - def __init__(self): - super(ProtectableRegistry, self).__init__() - self._protectable_map = {} - self._plugin_map = {} - - def load_plugins(self): - """Load all protectable plugins configured and register them. - - """ - mgr = extension.ExtensionManager( - namespace='karbor.protectables', - invoke_on_load=True, - on_load_failure_callback=_warn_missing_protectable) - - for e in mgr: - self.register_plugin(e.obj) - - def register_plugin(self, plugin): - self._plugin_map[plugin.get_resource_type()] = plugin - - def _get_protectable(self, context, resource_type, conf=cfg.CONF): - if resource_type in self._protectable_map: - return self._protectable_map[resource_type] - - protectable = self._plugin_map[resource_type].instance( - context, conf) - self._protectable_map[resource_type] = protectable - return protectable - - def list_resource_types(self): - """List all resource types supported by protectables. - - :return: The list of supported resource types. - """ - return [type for type in self._plugin_map.keys()] - - def get_protectable_resource_plugin(self, resource_type): - """Get the protectable plugin with the specified type.""" - return self._plugin_map.get(resource_type) - - def list_resources(self, context, resource_type, parameters=None): - """List resource instances of given type. - - :param resource_type: The resource type to list instance. - :return: The list of resource instance. - """ - protectable = self._get_protectable(context, resource_type) - return protectable.list_resources(context, parameters=parameters) - - def show_resource(self, context, resource_type, resource_id, - parameters=None): - """List resource instances of given type. - - :param resource_type: The resource type of instance. - :param resource_id: The resource id of instance. - :return: The show of resource instance. - """ - protectable = self._get_protectable(context, resource_type) - return protectable.show_resource(context, resource_id, - parameters=parameters) - - def fetch_dependent_resources(self, context, resource): - """List dependent resources under given parent resource. - - :param resource: The parent resource to list dependent resources. - :return: The list of dependent resources. - """ - result = [] - for plugin in self._plugin_map.values(): - if resource.type in plugin.get_parent_resource_types(): - protectable = self._get_protectable( - context, - plugin.get_resource_type()) - try: - protectable_resources = \ - protectable.get_dependent_resources(context, resource) - except ListProtectableResourceFailed as e: - LOG.error("List resources failed, so skip it. " - "Error: {0}".format(e)) - protectable_resources = [] - result.extend(protectable_resources) - - return result - - def build_graph(self, context, resources): - def fetch_dependent_resources_context(resource): - return self.fetch_dependent_resources(context, resource) - - return build_graph( - start_nodes=resources, - get_child_nodes_func=fetch_dependent_resources_context, - ) diff --git a/karbor/services/protection/protection_plugin.py b/karbor/services/protection/protection_plugin.py deleted file mode 100644 index 075941ff..00000000 --- a/karbor/services/protection/protection_plugin.py +++ /dev/null @@ -1,153 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class Operation(object): - def on_prepare_begin(self, checkpoint, resource, context, parameters, - **kwargs): - """on_prepare_begin hook runs before any child resource's hooks run - - Optional - :param checkpoint: checkpoint object for this operation - :param resource: a resource object for this operation - :param context: current operation context (viable for clients) - :param parameters: dictionary representing operation parameters - :param restore: Restore object for restore operation only - """ - pass - - def on_prepare_finish(self, checkpoint, resource, context, parameters, - **kwargs): - """on_prepare_finish hook runs after all child resources' prepare hooks - - Optional - :param checkpoint: checkpoint object for this operation - :param resource: a resource object for this operation - :param context: current operation context (viable for clients) - :param parameters: dictionary representing operation parameters - :param restore: Restore object for restore operation only - """ - pass - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - """on_main hook runs in parallel to other resources' on_main hooks - - Your main operation heavy lifting should probably be here. - Optional - :param checkpoint: checkpoint object for this operation - :param resource: a resource object for this operation - :param context: current operation context (viable for clients) - :param parameters: dictionary representing operation parameters - :param restore: Restore object for restore operation only - """ - pass - - def on_complete(self, checkpoint, resource, context, parameters, **kwargs): - """on_complete hook runs after all dependent resource's hooks - - Optional - :param checkpoint: checkpoint object for this operation - :param resource: a resource object for this operation - :param context: current operation context (viable for clients) - :param parameters: dictionary representing operation parameters - :param restore: Restore object for restore operation only - """ - pass - - -class ProtectionPlugin(object): - def __init__(self, config=None): - super(ProtectionPlugin, self).__init__() - self._config = config - - def get_protect_operation(self, resource): - """Returns the protect Operation for this resource - - :returns: Operation for the resource - """ - raise NotImplementedError - - def get_restore_operation(self, resource): - """Returns the restore Operation for this resource - - :returns: Operation for the resource - """ - raise NotImplementedError - - def get_verify_operation(self, resource): - """Returns the verify Operation for this resource - - :returns: Operation for the resource - """ - raise NotImplementedError - - def get_copy_operation(self, resource): - """Returns the copy Operation for this resource - - :returns: Operation for the resource - """ - raise NotImplementedError - - def get_delete_operation(self, resource): - """Returns the delete Operation for this resource - - :returns: Operation for the resource - """ - raise NotImplementedError - - @classmethod - def get_supported_resources_types(cls): - """Returns a list of resource types this plugin supports - - :returns: a list of resource types - """ - raise NotImplementedError - - @classmethod - def get_options_schema(cls, resource_type): - """Returns the protect options schema for a resource type - - :returns: a dictionary representing the schema - """ - raise NotImplementedError - - @classmethod - def get_saved_info_schema(cls, resource_type): - """Returns the saved info schema for a resource type - - :returns: a dictionary representing the schema - """ - raise NotImplementedError - - @classmethod - def get_restore_schema(cls, resource_type): - """Returns the restore schema for a resource type - - :returns: a dictionary representing the schema - """ - raise NotImplementedError - - @classmethod - def get_verify_schema(cls, resource_type): - """Returns the verify schema for a resource type - - :returns: a dictionary representing the schema - """ - raise NotImplementedError - - @classmethod - def get_saved_info(cls, metadata_store, resource): - """Returns the saved info for a resource - - :returns: a dictionary representing the saved info - """ - raise NotImplementedError diff --git a/karbor/services/protection/protection_plugins/__init__.py b/karbor/services/protection/protection_plugins/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protection_plugins/database/__init__.py b/karbor/services/protection/protection_plugins/database/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protection_plugins/database/database_backup_plugin.py b/karbor/services/protection/protection_plugins/database/database_backup_plugin.py deleted file mode 100644 index 11a16878..00000000 --- a/karbor/services/protection/protection_plugins/database/database_backup_plugin.py +++ /dev/null @@ -1,354 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial -import six - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins.database \ - import database_backup_plugin_schemas as database_instance_schemas -from karbor.services.protection.protection_plugins import utils -from oslo_config import cfg -from oslo_log import log as logging -from troveclient import exceptions as trove_exc - -LOG = logging.getLogger(__name__) - -trove_backup_opts = [ - cfg.IntOpt( - 'poll_interval', default=15, - help='Poll interval for Trove Database Instance status.' - ) -] - -DATABASE_FAILURE_STATUSES = {'BLOCKED', 'FAILED', 'REBOOT', - 'SHUTDOWN', 'ERROR', - 'RESTART_REQUIRED', 'EJECT', 'DETACH'} - -DATABASE_IGNORE_STATUSES = {'BUILD', 'RESIZE', 'BACKUP', 'PROMOTE', 'UPGRADE'} - - -def get_backup_status(trove_client, backup_id): - return get_resource_status(trove_client.backups, backup_id, - 'backup') - - -def get_database_instance_status(trove_client, instance_id): - return get_resource_status(trove_client.instances, instance_id, 'instance') - - -def get_resource_status(resource_manager, resource_id, resource_type): - LOG.debug('Polling %(resource_type)s (id: %(resource_id)s)', - {'resource_type': resource_type, 'resource_id': resource_id}) - try: - resource = resource_manager.get(resource_id) - status = resource.status - except trove_exc.NotFound: - status = 'not-found' - LOG.debug( - 'Polled %(resource_type)s (id: %(resource_id)s) status: %(status)s', - {'resource_type': resource_type, 'resource_id': resource_id, - 'status': status} - ) - return status - - -class ProtectOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(ProtectOperation, self).__init__() - self._interval = poll_interval - - def _create_backup(self, trove_client, instance_id, backup_name, - description): - backup = trove_client.backups.create( - backup_name, - instance=instance_id, - description=description - ) - - backup_id = backup.id - is_success = utils.status_poll( - partial(get_backup_status, trove_client, backup_id), - interval=self._interval, - success_statuses={'COMPLETED'}, - failure_statuses={'FAILED'}, - ignore_statuses={'BUILDING'}, - ignore_unexpected=True - ) - - if not is_success: - try: - backup = trove_client.backups.get(backup_id) - except Exception: - reason = 'Unable to find backup.' - else: - reason = 'The status of backup is %s' % backup.status - raise exception.CreateResourceFailed( - name="Database Instance Backup", - reason=reason, resource_id=instance_id, - resource_type=constants.DATABASE_RESOURCE_TYPE) - - return backup_id - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - instance_id = resource.id - bank_section = checkpoint.get_resource_bank_section(instance_id) - trove_client = ClientFactory.create_client('trove', context) - LOG.info('creating database instance backup, instance_id: %s', - instance_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_PROTECTING) - instance_info = trove_client.instances.get(instance_id) - if instance_info.status != "ACTIVE": - is_success = utils.status_poll( - partial(get_database_instance_status, trove_client, - instance_id), - interval=self._interval, success_statuses={'ACTIVE'}, - failure_statuses=DATABASE_FAILURE_STATUSES, - ignore_statuses=DATABASE_IGNORE_STATUSES, - ) - if not is_success: - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Database instance Backup", - reason='Database instance is in a error status.', - resource_id=instance_id, - resource_type=constants.DATABASE_RESOURCE_TYPE, - ) - resource_metadata = { - 'instance_id': instance_id, - 'datastore': instance_info.datastore, - 'flavor': instance_info.flavor, - 'size': instance_info.volume['size'], - } - backup_name = parameters.get('backup_name', 'backup%s' % ( - instance_id)) - description = parameters.get('description', None) - try: - backup_id = self._create_backup( - trove_client, instance_id, backup_name, description) - except exception.CreateResourceFailed as e: - LOG.error('Error creating backup (instance_id: %(instance_id)s ' - ': %(reason)s', {'instance_id': instance_id, - 'reason': e}) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise - - resource_metadata['backup_id'] = backup_id - - bank_section.update_object('metadata', resource_metadata) - bank_section.update_object('status', - constants.RESOURCE_STATUS_AVAILABLE) - LOG.info('Backup database instance (instance_id: %(instance_id)s ' - 'backup_id: %(backup_id)s ) successfully', - {'instance_id': instance_id, 'backup_id': backup_id}) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_instance_id = resource.id - bank_section = checkpoint.get_resource_bank_section( - original_instance_id) - trove_client = ClientFactory.create_client('trove', context) - resource_metadata = bank_section.get_object('metadata') - restore_name = parameters.get('restore_name', - '%s@%s' % (checkpoint.id, - original_instance_id)) - flavor = resource_metadata['flavor'] - size = resource_metadata['size'] - backup_id = resource_metadata['backup_id'] - restore = kwargs.get('restore') - LOG.info("Restoring a database instance from backup, " - "original_instance_id: %s.", original_instance_id) - - try: - instance_info = trove_client.instances.create( - restore_name, flavor["id"], volume={"size": size}, - restorePoint={"backupRef": backup_id}) - is_success = utils.status_poll( - partial(get_database_instance_status, trove_client, - instance_info.id), - interval=self._interval, success_statuses={'ACTIVE'}, - failure_statuses=DATABASE_FAILURE_STATUSES, - ignore_statuses=DATABASE_IGNORE_STATUSES - ) - if is_success is not True: - LOG.error('The status of database instance is ' - 'invalid. status:%s', instance_info.status) - restore.update_resource_status( - constants.DATABASE_RESOURCE_TYPE, - instance_info.id, instance_info.status, - "Invalid status.") - restore.save() - raise exception.RestoreResourceFailed( - name="Database instance Backup", - reason="Invalid status.", - resource_id=original_instance_id, - resource_type=constants.DATABASE_RESOURCE_TYPE) - restore.update_resource_status( - constants.DATABASE_RESOURCE_TYPE, - instance_info.id, instance_info.status) - restore.save() - except Exception as e: - LOG.error("Restore Database instance from backup " - "failed, instance_id: %s.", original_instance_id) - raise exception.RestoreResourceFailed( - name="Database instance Backup", - reason=e, resource_id=original_instance_id, - resource_type=constants.DATABASE_RESOURCE_TYPE) - LOG.info("Finish restoring a Database instance from backup," - "instance_id: %s.", original_instance_id) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_instance_id = resource.id - bank_section = checkpoint.get_resource_bank_section( - original_instance_id) - trove_client = ClientFactory.create_client('trove', context) - resource_metadata = bank_section.get_object('metadata') - LOG.info('Verifying the database instance, instance_id: %s', - original_instance_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, original_instance_id) - - backup_id = resource_metadata['backup_id'] - try: - instance_backup = trove_client.backups.get(backup_id) - backup_status = instance_backup.status - except Exception as ex: - LOG.error('Getting database backup (backup_id: %(backup_id)s):' - '%(reason)s fails', - {'backup_id': backup_id, 'reason': ex}) - reason = 'Getting database backup fails.' - update_method(constants.RESOURCE_STATUS_ERROR, reason) - raise - - if backup_status == 'COMPLETED': - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ('The status of database backup status is %s.' - % backup_status) - update_method(backup_status, reason) - raise exception.VerifyResourceFailed( - name="Database backup", - reason=reason, - resource_id=original_instance_id, - resource_type=resource.type) - - -class DeleteOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(DeleteOperation, self).__init__() - self._interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - backup_id = None - try: - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETING) - resource_metadata = bank_section.get_object('metadata') - backup_id = resource_metadata['backup_id'] - trove_client = ClientFactory.create_client('trove', context) - try: - backup = trove_client.backups.get(backup_id) - trove_client.backups.delete(backup) - except trove_exc.NotFound: - LOG.info('Backup id: %s not found. Assuming deleted', - backup_id) - is_success = utils.status_poll( - partial(get_backup_status, trove_client, backup_id), - interval=self._interval, - success_statuses={'not-found'}, - failure_statuses={'FAILED', 'DELETE_FAILED'}, - ignore_statuses={'COMPLETED'}, - ignore_unexpected=True - ) - if not is_success: - raise exception.NotFound() - bank_section.delete_object('metadata') - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETED) - except Exception as e: - LOG.error('Delete Database instance Backup failed, backup_id: %s', - backup_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Database instance Backup", - reason=six.text_type(e), - resource_id=resource_id, - resource_type=constants.DATABASE_RESOURCE_TYPE - ) - - -class DatabaseBackupProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.DATABASE_RESOURCE_TYPE] - - def __init__(self, config=None): - super(DatabaseBackupProtectionPlugin, self).__init__(config) - self._config.register_opts(trove_backup_opts, - 'database_backup_plugin') - self._plugin_config = self._config.database_backup_plugin - self._poll_interval = self._plugin_config.poll_interval - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resources_type): - return database_instance_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resources_type): - return database_instance_schemas.RESTORE_SCHEMA - - @classmethod - def get_verify_schema(cls, resources_type): - return database_instance_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resources_type): - return database_instance_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - def get_protect_operation(self, resource): - return ProtectOperation(self._poll_interval) - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_verify_operation(self, resource): - return VerifyOperation() - - def get_delete_operation(self, resource): - return DeleteOperation(self._poll_interval) diff --git a/karbor/services/protection/protection_plugins/database/database_backup_plugin_schemas.py b/karbor/services/protection/protection_plugins/database/database_backup_plugin_schemas.py deleted file mode 100644 index 4e177115..00000000 --- a/karbor/services/protection/protection_plugins/database/database_backup_plugin_schemas.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Database Instance Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the backup.", - "default": None - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the backup." - } - }, - "required": ["backup_name"] -} - -RESTORE_SCHEMA = { - "title": "Database Instance Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Database Instance Name", - "description": "The name of the restore Database Instance", - "default": None - }, - }, - "required": ["restore_name"] -} - -VERIFY_SCHEMA = { - "title": "Database backup Verify", - "type": "object", - "properties": {} -} - -SAVED_INFO_SCHEMA = { - "title": "Database Instance Protection Saved Info", - "type": "object", - "properties": {}, - "required": [] -} diff --git a/karbor/services/protection/protection_plugins/image/__init__.py b/karbor/services/protection/protection_plugins/image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protection_plugins/image/image_plugin_schemas.py b/karbor/services/protection/protection_plugins/image/image_plugin_schemas.py deleted file mode 100644 index 70da2ef9..00000000 --- a/karbor/services/protection/protection_plugins/image/image_plugin_schemas.py +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Image Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the backup.", - "default": None - } - }, - "required": [] -} - -RESTORE_SCHEMA = { - "title": "Image Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Image Name", - "description": "The name of the restore image", - }, - }, - "required": ["backup_name"] -} - -VERIFY_SCHEMA = { - "title": "Image Protection Verify", - "type": "object", - "properties": {} -} - -# TODO(hurong) -SAVED_INFO_SCHEMA = { - "title": "Image Protection Saved Info", - "type": "object", - "properties": { - "image_metadata": { - "type": "image", - "title": "Image Metadata", - "description": "To save disk_format and container_format", - } - }, - "required": ["image_metadata"] -} diff --git a/karbor/services/protection/protection_plugins/image/image_protection_plugin.py b/karbor/services/protection/protection_plugins/image/image_protection_plugin.py deleted file mode 100644 index 5fb358cd..00000000 --- a/karbor/services/protection/protection_plugins/image/image_protection_plugin.py +++ /dev/null @@ -1,390 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins.image \ - import image_plugin_schemas as image_schemas -from karbor.services.protection.protection_plugins import utils -from oslo_config import cfg -from oslo_log import log as logging - -image_backup_opts = [ - cfg.IntOpt('backup_image_object_size', - default=65536 * 10, - help='The size in bytes of instance image objects. ' - 'The value must be a multiple of 65536(' - 'the size of image\'s chunk).'), - cfg.IntOpt('poll_interval', default=10, - help='Poll interval for image status'), - cfg.BoolOpt('enable_server_snapshot', - default=True, - help='Enable server snapshot when server is ' - 'the parent resource of image') -] - -LOG = logging.getLogger(__name__) - - -def get_image_status(glance_client, image_id): - LOG.debug('Polling image (image_id: %s)', image_id) - try: - image = glance_client.images.get(image_id) - status = image.get('status') - except exception.NotFound: - status = 'not-found' - LOG.debug('Polled image (image_id: %s) status: %s', - image_id, status) - return status - - -def get_server_status(nova_client, server_id): - LOG.debug('Polling server (server_id: %s)', server_id) - try: - server = nova_client.servers.get(server_id) - status = server.status - except exception.NotFound: - status = 'not-found' - LOG.debug('Polled server (server_id: %s) status: %s', server_id, status) - return status - - -class ProtectOperation(protection_plugin.Operation): - def __init__(self, backup_image_object_size, - poll_interval, enable_server_snapshot): - super(ProtectOperation, self).__init__() - self._data_block_size_bytes = backup_image_object_size - self._interval = poll_interval - self._enable_server_snapshot = enable_server_snapshot - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - LOG.debug('Start creating image backup, resource info: %s', - resource.to_dict()) - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - glance_client = ClientFactory.create_client('glance', context) - bank_section.update_object("status", - constants.RESOURCE_STATUS_PROTECTING) - resource_definition = {'resource_id': resource_id} - if resource.extra_info and self._enable_server_snapshot: - image_id = self._create_server_snapshot(context, glance_client, - parameters, resource, - resource_definition, - resource_id) - need_delete_temp_image = True - else: - image_id = resource_id - need_delete_temp_image = False - - LOG.info("Creating image backup, image_id: %s.", image_id) - try: - image_info = glance_client.images.get(image_id) - if image_info.status != "active": - is_success = utils.status_poll( - partial(get_image_status, glance_client, image_info.id), - interval=self._interval, success_statuses={'active'}, - ignore_statuses={'queued', 'saving'}, - failure_statuses={'killed', 'deleted', 'pending_delete', - 'deactivated', 'NotFound'} - ) - if is_success is not True: - LOG.error("The status of image (id: %s) is invalid.", - image_id) - raise exception.CreateResourceFailed( - name="Image Backup", - reason="The status of image is invalid.", - resource_id=image_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - image_metadata = { - "disk_format": image_info.disk_format, - "container_format": image_info.container_format, - "checksum": image_info.checksum, - } - resource_definition["image_metadata"] = image_metadata - bank_section.update_object("metadata", resource_definition) - except Exception as err: - LOG.error("Create image backup failed, image_id: %s.", image_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Image Backup", - reason=err, - resource_id=image_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - self._create_backup(glance_client, bank_section, image_id) - if need_delete_temp_image: - try: - glance_client.images.delete(image_id) - except Exception as error: - LOG.warning('Failed to delete temporary image: %s', error) - else: - LOG.debug('Delete temporary image(%s) success', image_id) - - def _create_server_snapshot(self, context, glance_client, parameters, - resource, resource_definition, resource_id): - server_id = resource.extra_info.get('server_id') - resource_definition['resource_id'] = resource_id - resource_definition['server_id'] = server_id - nova_client = ClientFactory.create_client('nova', context) - is_success = utils.status_poll( - partial(get_server_status, nova_client, server_id), - interval=self._interval, - success_statuses={'ACTIVE', 'STOPPED', 'SUSPENDED', - 'PAUSED'}, - failure_statuses={'DELETED', 'ERROR', 'RESIZED', 'SHELVED', - 'SHELVED_OFFLOADED', 'SOFT_DELETED', - 'RESCUED', 'not-found'}, - ignore_statuses={'BUILDING'}, - ) - if not is_success: - raise exception.CreateResourceFailed( - name="Image Backup", - reason='The parent server of the image is not in valid' - ' status', - resource_id=resource_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - temp_image_name = 'Temp_image_name_for_karbor' + server_id - try: - image_uuid = nova_client.servers.create_image( - server_id, temp_image_name, parameters) - except Exception as e: - msg = "Failed to create the server snapshot: %s" % e - LOG.exception(msg) - raise exception.CreateResourceFailed( - name="Image Backup", - reason=msg, - resource_id=resource_id, - resource_type=constants.IMAGE_RESOURCE_TYPE - ) - else: - is_success = utils.status_poll( - partial(get_image_status, glance_client, image_uuid), - interval=self._interval, - success_statuses={'active'}, - failure_statuses={'killed', 'deleted', 'pending_delete', - 'deactivated'}, - ignore_statuses={'queued', 'saving', 'uploading'}) - if not is_success: - msg = "Image has been created, but fail to become " \ - "active, so delete it and raise exception." - LOG.error(msg) - glance_client.images.delete(image_uuid) - image_uuid = None - if not image_uuid: - raise exception.CreateResourceFailed( - name="Image Backup", - reason="Create parent server snapshot failed.", - resource_id=resource_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - return image_uuid - - def _create_backup(self, glance_client, bank_section, image_id): - try: - chunks_num = utils.backup_image_to_bank( - glance_client, - image_id, bank_section, - self._data_block_size_bytes - ) - - # Save the chunks_num to metadata - resource_definition = bank_section.get_object("metadata") - if resource_definition is not None: - resource_definition["chunks_num"] = chunks_num - bank_section.update_object("metadata", resource_definition) - - # Update resource_definition backup_status - bank_section.update_object("status", - constants.RESOURCE_STATUS_AVAILABLE) - LOG.info('Protecting image (id: %s) to bank completed ' - 'successfully', image_id) - except Exception as err: - # update resource_definition backup_status - LOG.exception('Protecting image (id: %s) to bank failed.', - image_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Image Backup", - reason=err, - resource_id=image_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - - -class DeleteOperation(protection_plugin.Operation): - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - image_id = resource.id - bank_section = checkpoint.get_resource_bank_section(image_id) - - LOG.info("Deleting image backup, image_id: %s.", image_id) - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETING) - objects = bank_section.list_objects() - for obj in objects: - if obj == "status": - continue - bank_section.delete_object(obj) - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETED) - except Exception as err: - LOG.error("delete image backup failed, image_id: %s.", image_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Image Backup", - reason=err, - resource_id=image_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval, enable_server_snapshot): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - self._enable_server_snapshot = enable_server_snapshot - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_image_id = resource.id - name = parameters.get("restore_name", "karbor-restore-image") - LOG.info("Restoring image backup, image_id: %s.", original_image_id) - - glance_client = ClientFactory.create_client('glance', context) - bank_section = checkpoint.get_resource_bank_section(original_image_id) - image_info = None - try: - image_info = utils.restore_image_from_bank( - glance_client, bank_section, name) - - if image_info.status != "active": - is_success = utils.status_poll( - partial(get_image_status, glance_client, image_info.id), - interval=self._interval, success_statuses={'active'}, - ignore_statuses={'queued', 'saving'}, - failure_statuses={'killed', 'deleted', 'pending_delete', - 'deactivated', 'not-found'} - ) - if is_success is not True: - LOG.error('The status of image is invalid. status:%s', - image_info.status) - raise exception.RestoreResourceFailed( - name="Image Backup", - resource_id=image_info.id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - - kwargs.get("new_resources")[original_image_id] = image_info.id - except Exception as e: - LOG.error("Restore image backup failed, image_id: %s.", - original_image_id) - if image_info is not None and hasattr(image_info, 'id'): - LOG.info("Delete the failed image, image_id: %s.", - image_info.id) - glance_client.images.delete(image_info.id) - raise exception.RestoreResourceFailed( - name="Image Backup", - reason=e, resource_id=original_image_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - LOG.info("Finish restoring image backup, image_id: %s.", - original_image_id) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_image_id = resource.id - bank_section = checkpoint.get_resource_bank_section( - original_image_id) - LOG.info('Verifying the image backup, server_id: %s', - original_image_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, original_image_id) - - backup_status = bank_section.get_object("status") - - if backup_status == constants.RESOURCE_STATUS_AVAILABLE: - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ('The status of image backup status is %s.' - % backup_status) - update_method(backup_status, reason) - raise exception.VerifyResourceFailed( - name="Image backup", - reason=reason, - resource_id=original_image_id, - resource_type=resource.type) - - -class GlanceProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.IMAGE_RESOURCE_TYPE] - - def __init__(self, config=None): - super(GlanceProtectionPlugin, self).__init__(config) - self._config.register_opts(image_backup_opts, - 'image_backup_plugin') - self._plugin_config = self._config.image_backup_plugin - self._data_block_size_bytes = ( - self._plugin_config.backup_image_object_size) - self._poll_interval = self._plugin_config.poll_interval - self._enable_server_snapshot = ( - self._plugin_config.enable_server_snapshot) - - if self._data_block_size_bytes % 65536 != 0 or ( - self._data_block_size_bytes <= 0): - raise exception.InvalidParameterValue( - err="The value of CONF.backup_image_object_size " - "is invalid!") - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resources_type): - return image_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resources_type): - return image_schemas.RESTORE_SCHEMA - - @classmethod - def get_verify_schema(cls, resources_type): - return image_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resources_type): - return image_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - def get_protect_operation(self, resource): - return ProtectOperation(self._data_block_size_bytes, - self._poll_interval, - self._enable_server_snapshot) - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval, - self._enable_server_snapshot) - - def get_verify_operation(self, resource): - return VerifyOperation() - - def get_delete_operation(self, resource): - return DeleteOperation() diff --git a/karbor/services/protection/protection_plugins/network/__init__.py b/karbor/services/protection/protection_plugins/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protection_plugins/network/network_plugin_schemas.py b/karbor/services/protection/protection_plugins/network/network_plugin_schemas.py deleted file mode 100644 index 745ec908..00000000 --- a/karbor/services/protection/protection_plugins/network/network_plugin_schemas.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Network Protection Options", - "type": "object", - "properties": {}, - "required": [] -} - -RESTORE_SCHEMA = { - "title": "Network Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Network Name", - "description": "The name of the restore network", - }, - }, - "required": ["restore_name"] -} - -VERIFY_SCHEMA = { - "title": "Network Protection Verify", - "type": "object", - "properties": {} -} - -SAVED_INFO_SCHEMA = { - "title": "Network Protection Saved Info", - "type": "object", - "properties": {}, - "required": [] -} diff --git a/karbor/services/protection/protection_plugins/network/neutron_protection_plugin.py b/karbor/services/protection/protection_plugins/network/neutron_protection_plugin.py deleted file mode 100644 index 8b05f9b2..00000000 --- a/karbor/services/protection/protection_plugins/network/neutron_protection_plugin.py +++ /dev/null @@ -1,741 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -from functools import partial -from neutronclient.common import exceptions -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins.network \ - import network_plugin_schemas -from karbor.services.protection.protection_plugins import utils - -LOG = logging.getLogger(__name__) - - -neutron_backup_opts = [ - cfg.IntOpt( - 'poll_interval', default=15, - help='Poll interval for Neutron backup status' - ), -] - - -def get_network_id(cntxt): - network_id = cntxt.project_id - return network_id - - -class ProtectOperation(protection_plugin.Operation): - _SUPPORT_RESOURCE_TYPES = [constants.NETWORK_RESOURCE_TYPE] - - def _get_resources_by_network(self, cntxt, neutron_client): - try: - networks = neutron_client.list_networks( - project_id=cntxt.project_id).get('networks') - networks_metadata = {} - - allowed_keys = [ - 'id', - 'admin_state_up', - 'availability_zone_hints', - 'description', - 'ipv4_address_scope', - 'ipv6_address_scope', - 'mtu', - 'name', - 'port_security_enabled', - 'router:external', - 'shared', - 'status', - 'subnets', - 'tags', - 'tenant_id' - ] - - for network in networks: - network_metadata = { - k: network[k] for k in network if k in allowed_keys} - networks_metadata[network["id"]] = network_metadata - return networks_metadata - except Exception as e: - LOG.exception("List all summary networks from neutron failed.") - raise exception.GetProtectionNetworkSubResourceFailed( - type=self._SUPPORT_RESOURCE_TYPES, - reason=six.text_type(e)) - - def _get_resources_by_subnet(self, cntxt, neutron_client): - try: - subnets = neutron_client.list_subnets( - project_id=cntxt.project_id).get('subnets') - subnets_metadata = {} - - allowed_keys = [ - 'cidr', - 'allocation_pools', - 'description', - 'dns_nameservers', - 'enable_dhcp', - 'gateway_ip', - 'host_routes', - 'id', - 'ip_version', - 'ipv6_address_mode', - 'ipv6_ra_mode', - 'name', - 'network_id', - 'subnetpool_id', - 'tenant_id' - ] - - for subnet in subnets: - subnet_metadata = { - k: subnet[k] for k in subnet if k in allowed_keys} - subnets_metadata[subnet["id"]] = subnet_metadata - - return subnets_metadata - except Exception as e: - LOG.exception("List all summary subnets from neutron failed.") - raise exception.GetProtectionNetworkSubResourceFailed( - type=self._SUPPORT_RESOURCE_TYPES, - reason=six.text_type(e)) - - def _get_resources_by_port(self, cntxt, neutron_client): - try: - ports = neutron_client.list_ports( - project_id=cntxt.project_id).get('ports') - ports_metadata = {} - - allowed_keys = [ - 'admin_state_up', - 'allowed_address_pairs', - 'description', - 'device_id', - 'device_owner', - 'extra_dhcp_opts', - 'fixed_ips', - 'id', - 'mac_address', - 'name', - 'network_id', - 'port_security_enabled', - 'security_groups', - 'status', - 'tenant_id' - ] - - for port in ports: - port_metadata = { - k: port[k] for k in port if k in allowed_keys} - ports_metadata[port["id"]] = port_metadata - return ports_metadata - except Exception as e: - LOG.exception("List all summary ports from neutron failed.") - raise exception.GetProtectionNetworkSubResourceFailed( - type=self._SUPPORT_RESOURCE_TYPES, - reason=six.text_type(e)) - - def _get_resources_by_router(self, cntxt, neutron_client): - try: - routers = neutron_client.list_routers( - project_id=cntxt.project_id).get('routers') - routers_metadata = {} - - allowed_keys = [ - 'admin_state_u', - 'availability_zone_hints', - 'description', - 'external_gateway_info', - 'id', - 'name', - 'routes', - 'status' - ] - - for router in routers: - router_metadata = { - k: router[k] for k in router if k in allowed_keys} - routers_metadata[router["id"]] = router_metadata - - return routers_metadata - except Exception as e: - LOG.exception("List all summary routers from neutron failed.") - raise exception.GetProtectionNetworkSubResourceFailed( - type=self._SUPPORT_RESOURCE_TYPES, - reason=six.text_type(e)) - - def _get_resources_by_security_group(self, cntxt, neutron_client): - try: - sgs = neutron_client.list_security_groups( - project_id=cntxt.project_id).get('security_groups') - sgs_metadata = {} - - allowed_keys = [ - 'id', - 'description', - 'name', - 'security_group_rules', - 'tenant_id' - ] - - for sg in sgs: - sg_metadata = {k: sg[k] for k in sg if k in allowed_keys} - sgs_metadata[sg["id"]] = sg_metadata - return sgs_metadata - except Exception as e: - LOG.exception("List all summary security_groups from neutron " - "failed.") - raise exception.GetProtectionNetworkSubResourceFailed( - type=self._SUPPORT_RESOURCE_TYPES, - reason=six.text_type(e)) - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - network_id = get_network_id(context) - backup_name = kwargs.get("backup_name", "karbor network backup") - bank_section = checkpoint.get_resource_bank_section(network_id) - neutron_client = ClientFactory.create_client("neutron", context) - - resource_definition = {"resource_id": network_id} - resource_definition["backup_name"] = backup_name - resource_definition["network_metadata"] = ( - self._get_resources_by_network(context, neutron_client)) - resource_definition["subnet_metadata"] = ( - self._get_resources_by_subnet(context, neutron_client)) - resource_definition["port_metadata"] = ( - self._get_resources_by_port(context, neutron_client)) - resource_definition["router_metadata"] = ( - self._get_resources_by_router(context, neutron_client)) - resource_definition["security-group_metadata"] = ( - self._get_resources_by_security_group(context, neutron_client)) - - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_PROTECTING) - - # write resource_definition in bank - bank_section.update_object("metadata", resource_definition) - - # update resource_definition backup_status - bank_section.update_object("status", - constants.CHECKPOINT_STATUS_AVAILABLE) - LOG.info("finish backup network, network_id: %s.", network_id) - except Exception as err: - # update resource_definition backup_status - LOG.error("create backup failed, network_id: %s.", network_id) - bank_section.update_object("status", - constants.CHECKPOINT_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Network Backup", - reason=err, - resource_id=network_id, - resource_type=self._SUPPORT_RESOURCE_TYPES) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - network_id = get_network_id(context) - bank_section = checkpoint.get_resource_bank_section( - network_id) - LOG.info('Verifying the network backup, network_id: %s.', - network_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, network_id) - - backup_status = bank_section.get_object("status") - - if backup_status == constants.RESOURCE_STATUS_AVAILABLE: - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ('The status of network backup status is %s.' - % backup_status) - update_method(backup_status, reason) - raise exception.VerifyResourceFailed( - name="Network backup", - reason=reason, - resource_id=network_id, - resource_type=resource.type) - - -class RestoreOperation(protection_plugin.Operation): - - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - - def _check_complete(self, neutron_client, resources, resource_type): - - success_statuses = ('ACTIVE', 'DOWN') - failure_statuses = ('ERROR', 'DEGRADED') - ignore_statuses = ('BUILD') - - get_resource_func = getattr(neutron_client, "show_%s" % resource_type) - - def _get_resource_status(resource_id): - return get_resource_func(resource_id)[resource_type]['status'] - - def _get_all_resource_status(): - recheck_resources = set() - - for r in resources: - status = _get_resource_status(r) - if status in success_statuses: - continue - elif status in failure_statuses: - return status - elif status in ignore_statuses: - recheck_resources.add(r) - else: - return status - - if recheck_resources: - resources.difference_update(recheck_resources) - return ignore_statuses[0] - return success_statuses[0] - - return utils.status_poll(_get_all_resource_status, self._interval, - success_statuses, failure_statuses, - ignore_statuses) - - def _restore_networks(self, neutron_client, new_resources, nets_meta): - net_ids = set() - for _, net_data in nets_meta.items(): - if net_data["router:external"]: - continue - - props = { - "admin_state_up": net_data["admin_state_up"], - "port_security_enabled": net_data["port_security_enabled"], - "shared": net_data["shared"], - "name": net_data["name"] - } - net_id = neutron_client.create_network( - {'network': props})['network']['id'] - new_resources[net_data["name"]] = net_id - net_ids.add(net_id) - - is_success = self._check_complete(neutron_client, net_ids, 'network') - if not is_success: - raise Exception("Create networks failed") - - def _restore_subnets(self, neutron_client, new_resources, - nets_meta, subs_meta): - for _, sub_data in subs_meta.items(): - props = { - "cidr": sub_data["cidr"], - "allocation_pools": sub_data["allocation_pools"], - "dns_nameservers": sub_data["dns_nameservers"], - "enable_dhcp": sub_data["enable_dhcp"], - "gateway_ip": sub_data["gateway_ip"] if ( - sub_data["gateway_ip"] != '') else None, - "host_routes": sub_data["host_routes"], - "name": sub_data["name"], - "ip_version": sub_data["ip_version"], - "network_id": new_resources.get( - nets_meta[sub_data['network_id']]['name']), - "tenant_id": sub_data["tenant_id"], - } - - subnet_id = neutron_client.create_subnet( - {'subnet': props})['subnet']['id'] - new_resources[sub_data["name"]] = subnet_id - - def _get_new_fixed_ips(self, new_resources, subs_meta, fixed_ips_meta): - new_fixed_ips = [] - for fixed_ip in fixed_ips_meta: - subnet = subs_meta.get(fixed_ip["subnet_id"]) - if not subnet: - continue - - props = { - "ip_address": fixed_ip["ip_address"], - "subnet_id": new_resources.get( - subnet['name']) - } - new_fixed_ips.append(props) - - return new_fixed_ips - - def _restore_ports(self, neutron_client, new_resources, - nets_meta, subs_meta, ports_meta): - port_ids = set() - for _, port_data in ports_meta.items(): - if port_data["device_owner"] in ( - "network:router_interface", "network:router_gateway", - "network:dhcp", "network:floatingip"): - continue - - props = { - "admin_state_up": port_data["admin_state_up"], - "device_id": port_data["device_id"], - "device_owner": port_data["device_owner"], - "mac_address": port_data["mac_address"], - "name": port_data["name"], - "network_id": new_resources.get( - nets_meta[port_data['network_id']]['name']), - "port_security_enabled": port_data["port_security_enabled"], - } - new_fixed_ips = self._get_new_fixed_ips( - new_resources, subs_meta, port_data["fixed_ips"]) - if new_fixed_ips: - props["fixed_ips"] = new_fixed_ips - - address_pairs = port_data["allowed_address_pairs"] - if address_pairs: - address_pairs = copy.deepcopy(address_pairs) - for pair in address_pairs: - if pair.get("mac_address") is None: - pair.pop("mac_address", None) - props["allowed_address_pairs"] = address_pairs - else: - props["allowed_address_pairs"] = [] - - security_groups = port_data["security_groups"] - if security_groups: - props['security_groups'] = [ - sg - for sg in security_groups - if new_resources.get(sg) != 'default' - ] - - port_id = neutron_client.create_port({'port': props})['port']['id'] - new_resources[port_data["name"]] = port_id - port_ids.add(port_id) - - is_success = self._check_complete(neutron_client, port_ids, 'port') - if not is_success: - raise Exception("Create port failed") - - def _get_new_external_gateway(self, public_network_id, gateway_info, - neutron_client): - # get public network id - if not public_network_id: - networks = neutron_client.list_networks().get('networks') - for network in networks: - if network['router:external'] is True: - public_network_id = network['id'] - break - else: - return - - gateway = {"network_id": public_network_id} - if gateway_info.get("enable_snat") is not None: - gateway["enable_snat"] = gateway_info["enable_snat"] - return gateway - - def _restore_routers(self, neutron_client, new_resources, - public_network_id, routers_meta): - router_ids = set() - for _, router_data in routers_meta.items(): - props = {"name": router_data["name"]} - # If creating router with 'external_gateway_info', then Neutron - # will refuse to to that, because this operation will need role - # of Admin, but the curent user should not be the role of that. - # So, it needs to be refactored here later. - # new_external_gateway = self._get_new_external_gateway( - # public_network_id, router_data["external_gateway_info"], - # neutron_client) - # if new_external_gateway: - # props["external_gateway_info"] = new_external_gateway - router_id = neutron_client.create_router( - {'router': props})['router']['id'] - new_resources[router_data["name"]] = router_id - router_ids.add(router_id) - - is_success = self._check_complete(neutron_client, router_ids, 'router') - if not is_success: - raise Exception("Create router failed") - - def _restore_routerinterfaces(self, neutron_client, new_resources, - subs_meta, routers_meta, ports_meta): - for _, port_data in ports_meta.items(): - if port_data["device_owner"] != "network:router_interface": - continue - - router = routers_meta.get(port_data["device_id"]) - if not router: - continue - - fixed_ips = port_data["fixed_ips"] - if not fixed_ips: - continue - subnet = subs_meta.get(fixed_ips[0]["subnet_id"]) - if not subnet: - continue - - neutron_client.add_interface_router( - new_resources.get(router['name']), - { - 'subnet_id': new_resources.get( - subnet['name']) - } - ) - - def _get_security_group_rules(self, security_group_rules): - new_security_group_rules = [] - for sg in security_group_rules: - if sg["remote_ip_prefix"] is None: - continue - - security_group_rule = { - "direction": sg["direction"], - "ethertype": sg["ethertype"], - "port_range_max": sg["port_range_max"], - "port_range_min": sg["port_range_min"], - "protocol": sg["protocol"], - "remote_group_id": sg["remote_group_id"], - "remote_ip_prefix": sg["remote_ip_prefix"], - } - if "remote_mode" in sg: - security_group_rule["remote_mode"] = sg["remote_mode"] - - new_security_group_rules.append(security_group_rule) - - return new_security_group_rules - - def _create_security_group_rules(self, neutron_client, rules, sg_id): - - @excutils.exception_filter - def _ignore_not_found(ex): - if isinstance(ex, (exceptions.NotFound, - exceptions.NetworkNotFoundClient, - exceptions.PortNotFoundClient)): - return True - return (isinstance(ex, exceptions.NeutronClientException) and - ex.status_code == 404) - - def _is_egress(rule): - return rule['direction'] == 'egress' - - def _delete_rules(): - try: - sec = neutron_client.show_security_group( - sg_id)['security_group'] - except Exception as ex: - _ignore_not_found(ex) - else: - for rule in sec['security_group_rules']: - if _is_egress(rule): - with _ignore_not_found: - neutron_client.delete_security_group_rule( - rule['id']) - - def _format_rule(rule): - rule['security_group_id'] = sg_id - if 'remote_mode' in rule: - remote_mode = rule.pop('remote_mode') - - if remote_mode == 'remote_group_id': - rule['remote_ip_prefix'] = None - if not rule.get('remote_group_id'): - rule['remote_group_id'] = sg_id - else: - rule['remote_group_id'] = None - - for key in ('port_range_min', 'port_range_max'): - if rule.get(key) is not None: - rule[key] = str(rule[key]) - - egress_deleted = False - for rule in rules: - if _is_egress(rule) and not egress_deleted: - # There is at least one egress rule, so delete the default - # rules which allow all egress traffic - egress_deleted = True - - _delete_rules() - - _format_rule(rule) - - try: - neutron_client.create_security_group_rule( - {'security_group_rule': rule}) - except Exception as ex: - if not isinstance(ex, exceptions.Conflict) or ( - isinstance(ex, exceptions.OverQuotaClient)): - raise - - def _restore_securitygroups(self, neutron_client, new_resources, sgs_meta): - for _, sg_data in sgs_meta.items(): - # Skip the default securitygroups - if sg_data["name"] == "default": - continue - - props = { - "name": sg_data["name"], - "description": sg_data["description"], - } - sg_id = neutron_client.create_security_group( - {'security_group': props})['security_group']['id'] - new_resources[sg_data["name"]] = sg_id - - rules = self._get_security_group_rules( - sg_data["security_group_rules"]) - self._create_security_group_rules(neutron_client, rules, sg_id) - - def on_main(self, checkpoint, resource, context, - parameters, **kwargs): - neutron_client = ClientFactory.create_client("neutron", context) - network_id = get_network_id(context) - public_network_id = parameters.get("public_network_id") - bank_section = checkpoint.get_resource_bank_section(network_id) - new_resources = kwargs['new_resources'] - - def _filter_resources(resources): - ids = [] - for obj_id, data in resources.items(): - network = nets_meta.get(data['network_id']) - if not network or network.get("router:external"): - ids.append(obj_id) - for obj_id in ids: - resources.pop(obj_id) - - try: - resource_definition = bank_section.get_object("metadata") - - # Config Net - nets_meta = resource_definition.get("network_metadata") - if nets_meta: - self._restore_networks(neutron_client, new_resources, - nets_meta) - - # Config Securiy-group - sgs_meta = resource_definition.get("security-group_metadata") - if sgs_meta: - self._restore_securitygroups(neutron_client, new_resources, - sgs_meta) - - # Config Subnet - subs_meta = resource_definition.get("subnet_metadata") - _filter_resources(subs_meta) - if subs_meta: - self._restore_subnets(neutron_client, new_resources, - nets_meta, subs_meta) - - # Config Router - routers_meta = resource_definition.get("router_metadata") - if routers_meta: - self._restore_routers(neutron_client, new_resources, - public_network_id, routers_meta) - - # Config Port - ports_meta = resource_definition.get("port_metadata") - _filter_resources(ports_meta) - if ports_meta: - self._restore_ports(neutron_client, new_resources, nets_meta, - subs_meta, ports_meta) - - # Config RouterInterface - if all([i is not None - for i in [subs_meta, routers_meta, ports_meta]]): - self._restore_routerinterfaces( - neutron_client, new_resources, - subs_meta, routers_meta, ports_meta) - - except Exception as e: - LOG.error("restore network backup failed, network_id: %s.", - network_id) - raise exception.RestoreResourceFailed( - name="Network Backup", - reason=six.text_type(e), - resource_id=network_id, - resource_type=constants.NETWORK_RESOURCE_TYPE - ) - - -class DeleteOperation(protection_plugin.Operation): - def on_main(self, checkpoint, resource, cntxt, parameters, **kwargs): - network_id = self._get_network_id(cntxt) - bank_section = checkpoint.get_resource_bank_section(network_id) - - LOG.info("Deleting network backup, network_id: %s.", network_id) - - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETING) - objects = bank_section.list_objects() - for obj in objects: - if obj == "status": - continue - bank_section.delete_object(obj) - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETED) - except Exception as err: - # update resource_definition backup_status - LOG.error("Delete backup failed, network_id: %s.", network_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Network Backup", - reason=err, - resource_id=network_id, - resource_type=self._SUPPORT_RESOURCE_TYPES) - - -class NeutronProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.NETWORK_RESOURCE_TYPE] - - def __init__(self, config=None): - super(NeutronProtectionPlugin, self).__init__(config) - self._config.register_opts( - neutron_backup_opts, - 'neutron_backup_protection_plugin') - plugin_config = self._config.neutron_backup_protection_plugin - self._poll_interval = plugin_config.poll_interval - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resources_type): - return network_plugin_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resources_type): - return network_plugin_schemas.RESTORE_SCHEMA - - @classmethod - def get_verify_schema(cls, resources_type): - return network_plugin_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resources_type): - return network_plugin_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - # TODO(chenhuayi) - pass - - def get_protect_operation(self, resource): - return ProtectOperation() - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_verify_operation(self, resource): - return VerifyOperation() - - def get_delete_operation(self, resource): - # TODO(chenhuayi) - pass diff --git a/karbor/services/protection/protection_plugins/noop_plugin.py b/karbor/services/protection/protection_plugins/noop_plugin.py deleted file mode 100644 index a31077af..00000000 --- a/karbor/services/protection/protection_plugins/noop_plugin.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.common import constants -from karbor.services.protection import protection_plugin - - -class NoopOperation(protection_plugin.Operation): - def on_prepare_begin(self, *args, **kwargs): - pass - - def on_prepare_finish(self, *args, **kwargs): - pass - - def on_main(self, *args, **kwargs): - pass - - def on_complete(self, *args, **kwargs): - pass - - -class NoopProtectionPlugin(protection_plugin.ProtectionPlugin): - def get_protect_operation(self, resource): - return NoopOperation() - - def get_restore_operation(self, resource): - return NoopOperation() - - def get_delete_operation(self, resource): - return NoopOperation() - - @classmethod - def get_supported_resources_types(cls): - return constants.RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resource_type): - return {} - - @classmethod - def get_saved_info_schema(cls, resource_type): - return {} - - @classmethod - def get_restore_schema(cls, resource_type): - return {} - - @classmethod - def get_saved_info(cls, metadata_store, resource): - return None diff --git a/karbor/services/protection/protection_plugins/pod/__init__.py b/karbor/services/protection/protection_plugins/pod/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protection_plugins/pod/pod_plugin_schemas.py b/karbor/services/protection/protection_plugins/pod/pod_plugin_schemas.py deleted file mode 100644 index bae6e5b5..00000000 --- a/karbor/services/protection/protection_plugins/pod/pod_plugin_schemas.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Pod Protection Options", - "type": "object", - "properties": {}, - "required": [] -} - -RESTORE_SCHEMA = { - "title": "Pod Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Pod Name", - "description": "The name of the restore pod", - }, - }, - "required": ["restore_name"] -} - -VERIFY_SCHEMA = { - "title": "Pod Protection Verify", - "type": "object", - "properties": {} -} - -SAVED_INFO_SCHEMA = { - "title": "Pod Protection Saved Info", - "type": "object", - "properties": {}, - "required": [] -} diff --git a/karbor/services/protection/protection_plugins/pod/pod_protection_plugin.py b/karbor/services/protection/protection_plugins/pod/pod_protection_plugin.py deleted file mode 100644 index e96e1361..00000000 --- a/karbor/services/protection/protection_plugins/pod/pod_protection_plugin.py +++ /dev/null @@ -1,366 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins.pod \ - import pod_plugin_schemas -from karbor.services.protection.protection_plugins import utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -pod_backup_opts = [ - cfg.IntOpt( - 'poll_interval', default=15, - help='Poll interval for Pod backup status' - ), -] - - -class ProtectOperation(protection_plugin.Operation): - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - pod_id = resource.id - pod_name = resource.name - bank_section = checkpoint.get_resource_bank_section(pod_id) - k8s_client = ClientFactory.create_client("k8s", context) - resource_definition = {"resource_id": pod_id} - - LOG.info("Creating pod backup, id: %(pod_id)s) name: " - "%(pod_name)s.", {"pod_id": pod_id, "pod_name": pod_name}) - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_PROTECTING) - - # get metadata about pod - pod_namespace, k8s_pod_name = pod_name.split(":") - pod = k8s_client.read_namespaced_pod( - k8s_pod_name, pod_namespace) - resource_definition["resource_name"] = pod_name - resource_definition["namespace"] = pod_namespace - - mounted_volumes_list = self._get_mounted_volumes( - k8s_client, pod, pod_namespace) - containers_list = self._get_containers(pod) - - # save all pod's metadata - pod_metadata = { - 'apiVersion': pod.api_version, - 'kind': pod.kind, - 'metadata': { - 'labels': pod.metadata.labels, - 'name': pod.metadata.name, - 'namespace': pod.metadata.namespace, - }, - 'spec': { - 'containers': containers_list, - 'volumes': mounted_volumes_list, - 'restartPolicy': pod.spec.restart_policy - } - } - resource_definition["pod_metadata"] = pod_metadata - LOG.debug("Creating pod backup, pod_metadata: %s.", - pod_metadata) - bank_section.update_object("metadata", resource_definition) - bank_section.update_object("status", - constants.RESOURCE_STATUS_AVAILABLE) - LOG.info("Finish backup pod, pod_id: %s.", pod_id) - except Exception as err: - LOG.exception("Create pod backup failed, pod_id: %s.", pod_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Pod Backup", - reason=err, - resource_id=pod_id, - resource_type=constants.POD_RESOURCE_TYPE) - - def _get_mounted_volumes(self, k8s_client, pod, pod_namespace): - mounted_volumes_list = [] - for volume in pod.spec.volumes: - volume_pvc = volume.persistent_volume_claim - volume_cinder = volume.cinder - volume_pvc_name = volume.name - - if volume_pvc: - pvc_name = volume_pvc.claim_name - pvc = k8s_client.read_namespaced_persistent_volume_claim( - pvc_name, pod_namespace) - pv_name = pvc.spec.volume_name - if pv_name: - pv = k8s_client.read_persistent_volume(pv_name) - if pv.spec.cinder: - pod_cinder_volume = { - 'name': volume_pvc_name, - 'cinder': { - "volumeID": pv.spec.cinder.volume_id, - "fsType": pv.spec.cinder.fs_type, - "readOnly": pv.spec.cinder.read_only - } - } - mounted_volumes_list.append(pod_cinder_volume) - elif volume_cinder: - pod_cinder_volume = { - 'name': volume_pvc_name, - 'cinder': { - "volumeID": volume_cinder.volume_id, - "fsType": volume_cinder.fs_type, - "readOnly": volume_cinder.read_only - } - } - mounted_volumes_list.append(pod_cinder_volume) - return mounted_volumes_list - - def _get_containers(self, pod): - containers_list = [] - for spec_container in pod.spec.containers: - resources = (spec_container.resources.to_dict() - if spec_container.resources else None) - volume_mounts_list = [] - if spec_container.volume_mounts: - for spec_volume_mount in spec_container.volume_mounts: - if 'serviceaccount' in spec_volume_mount.mount_path: - continue - volume_mount = { - 'name': spec_volume_mount.name, - 'mountPath': spec_volume_mount.mount_path, - 'readOnly': spec_volume_mount.read_only, - } - volume_mounts_list.append(volume_mount) - container = { - 'command': spec_container.command, - 'image': spec_container.image, - 'name': spec_container.name, - 'resources': resources, - 'volumeMounts': volume_mounts_list - } - containers_list.append(container) - return containers_list - - -class DeleteOperation(protection_plugin.Operation): - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - - LOG.info("Deleting pod backup, pod_id: %s.", resource_id) - - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETING) - objects = bank_section.list_objects() - for obj in objects: - if obj == "status": - continue - bank_section.delete_object(obj) - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETED) - LOG.info("Finish delete pod, pod_id: %s.", resource_id) - except Exception as err: - LOG.error("Delete backup failed, pod_id: %s.", resource_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Pod Backup", - reason=err, - resource_id=resource_id, - resource_type=constants.POD_RESOURCE_TYPE) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_pod_id = resource.id - bank_section = checkpoint.get_resource_bank_section( - original_pod_id) - LOG.info('Verifying the pod backup, pod_id: %s.', original_pod_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, original_pod_id) - - backup_status = bank_section.get_object("status") - - if backup_status == constants.RESOURCE_STATUS_AVAILABLE: - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ('The status of pod backup status is %s.' - % backup_status) - update_method(backup_status, reason) - raise exception.VerifyResourceFailed( - name="Pod backup", - reason=reason, - resource_id=original_pod_id, - resource_type=resource.type) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - - def on_complete(self, checkpoint, resource, context, parameters, **kwargs): - original_pod_id = resource.id - LOG.info("Restoring pod backup, pod_id: %s.", original_pod_id) - - update_method = None - try: - resource_definition = checkpoint.get_resource_bank_section( - original_pod_id).get_object("metadata") - - LOG.debug("Restoring pod backup, metadata: %s.", - resource_definition) - - k8s_client = ClientFactory.create_client("k8s", context) - new_resources = kwargs.get("new_resources") - - # restore pod - new_pod_name = self._restore_pod_instance( - k8s_client, new_resources, original_pod_id, - parameters.get( - "restore_name", - "karbor-restored-pod-%s" % uuidutils.generate_uuid()), - resource_definition) - - update_method = partial(utils.update_resource_restore_result, - kwargs.get('restore'), resource.type, - new_pod_name) - update_method(constants.RESOURCE_STATUS_RESTORING) - pod_namespace = resource_definition["namespace"] - self._wait_pod_to_running(k8s_client, new_pod_name, - pod_namespace) - - new_resources[original_pod_id] = new_pod_name - update_method(constants.RESOURCE_STATUS_AVAILABLE) - LOG.info("Finish restore pod, pod_id: %s.", - original_pod_id) - - except Exception as e: - if update_method: - update_method(constants.RESOURCE_STATUS_ERROR, str(e)) - LOG.exception("Restore pod backup failed, pod_id: %s.", - original_pod_id) - raise exception.RestoreResourceFailed( - name="Pod Backup", - reason=e, - resource_id=original_pod_id, - resource_type=constants.POD_RESOURCE_TYPE - ) - - def _restore_pod_instance(self, k8s_client, new_resources, - original_id, restore_name, - resource_definition): - pod_namespace = resource_definition["namespace"] - pod_metadata = resource_definition["pod_metadata"] - mounted_volumes_list = pod_metadata['spec'].get("volumes", None) - if mounted_volumes_list: - for mounted_volume in mounted_volumes_list: - cinder_volume = mounted_volume.get("cinder", None) - if cinder_volume: - original_volume_id = cinder_volume["volumeID"] - cinder_volume["volumeID"] = new_resources.get( - original_volume_id) - pod_metadata["metadata"]["name"] = restore_name - pod_manifest = pod_metadata - - LOG.debug("Restoring pod instance, pod_manifest: %s.", - pod_manifest) - try: - pod = k8s_client.create_namespaced_pod(body=pod_manifest, - namespace=pod_namespace) - except Exception as ex: - LOG.error('Error creating pod (pod_id:%(pod_id)s): ' - '%(reason)s', {'server_id': original_id, 'reason': ex}) - raise - - return pod.metadata.name - - def _wait_pod_to_running(self, k8s_client, pod_name, pod_namespace): - def _get_pod_status(): - try: - pod = k8s_client.read_namespaced_pod(name=pod_name, - namespace=pod_namespace) - return pod.status.phase - except Exception as ex: - LOG.error('Fetch pod(%(pod_name)s) failed, ' - 'reason: %(reason)s', - {'pod_name': pod_name, - 'reason': ex}) - return 'ERROR' - - is_success = utils.status_poll( - _get_pod_status, - interval=self._interval, - success_statuses={'Running', }, - failure_statuses={'ERROR', 'Failed', 'Unknown'}, - ignore_statuses={'Pending'}, - ) - if not is_success: - raise Exception('The pod does not run successfully') - - -class PodProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.POD_RESOURCE_TYPE] - - def __init__(self, config=None): - super(PodProtectionPlugin, self).__init__(config) - self._config.register_opts(pod_backup_opts, - 'pod_backup_protection_plugin') - self._poll_interval = ( - self._config.pod_backup_protection_plugin.poll_interval) - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resource_type): - return pod_plugin_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resource_type): - return pod_plugin_schemas.RESTORE_SCHEMA - - @classmethod - def get_verify_schema(cls, resources_type): - return pod_plugin_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resource_type): - return pod_plugin_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - def get_protect_operation(self, resource): - return ProtectOperation() - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_verify_operation(self, resource): - return VerifyOperation() - - def get_delete_operation(self, resource): - return DeleteOperation() diff --git a/karbor/services/protection/protection_plugins/server/__init__.py b/karbor/services/protection/protection_plugins/server/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protection_plugins/server/nova_protection_plugin.py b/karbor/services/protection/protection_plugins/server/nova_protection_plugin.py deleted file mode 100644 index 723623aa..00000000 --- a/karbor/services/protection/protection_plugins/server/nova_protection_plugin.py +++ /dev/null @@ -1,483 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial - -from novaclient import exceptions -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins.server \ - import server_plugin_schemas -from karbor.services.protection.protection_plugins import utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -VOLUME_ATTACHMENT_RESOURCE = 'OS::Cinder::VolumeAttachment' -FLOATING_IP_ASSOCIATION = 'OS::Nova::FloatingIPAssociation' - -nova_backup_opts = [ - cfg.IntOpt( - 'poll_interval', default=15, - help='Poll interval for Nova backup status' - ), -] - - -class ProtectOperation(protection_plugin.Operation): - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - server_id = resource.id - bank_section = checkpoint.get_resource_bank_section(server_id) - - nova_client = ClientFactory.create_client("nova", context) - cinder_client = ClientFactory.create_client("cinder", context) - neutron_client = ClientFactory.create_client("neutron", context) - - resource_definition = {"resource_id": server_id} - - # get dependent resources - server_child_nodes = [] - resources = checkpoint.resource_graph - for resource_node in resources: - resource = resource_node.value - if resource.id == server_id: - server_child_nodes = resource_node.child_nodes - - LOG.info("Creating server backup, server_id: %s. ", server_id) - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_PROTECTING) - - # get attach_metadata about volume - attach_metadata = {} - for server_child_node in server_child_nodes: - child_resource = server_child_node.value - if child_resource.type == constants.VOLUME_RESOURCE_TYPE: - volume = cinder_client.volumes.get(child_resource.id) - attachments = getattr(volume, "attachments") - for attachment in attachments: - if attachment["server_id"] == server_id: - attachment["bootable"] = getattr( - volume, "bootable") - attach_metadata[child_resource.id] = attachment - resource_definition["attach_metadata"] = attach_metadata - - # get metadata about AZ - server = nova_client.servers.get(server_id) - availability_zone = getattr(server, "OS-EXT-AZ:availability_zone") - - # get metadata about network, flavor, key_name, security_groups - addresses = getattr(server, "addresses") - networks = [] - floating_ips = [] - for network_infos in addresses.values(): - for network_info in network_infos: - addr = network_info.get("addr") - mac = network_info.get("OS-EXT-IPS-MAC:mac_addr") - network_type = network_info.get("OS-EXT-IPS:type") - if network_type == 'fixed': - port = neutron_client.list_ports( - mac_address=mac)["ports"][0] - if port["network_id"] not in networks: - networks.append(port["network_id"]) - elif network_type == "floating": - floating_ips.append(addr) - flavor = getattr(server, "flavor")["id"] - key_name = getattr(server, "key_name", None) - security_groups = getattr(server, "security_groups", None) - - # get metadata about boot device - boot_metadata = {} - image_info = getattr(server, "image", None) - if image_info is not None and isinstance(image_info, dict): - boot_metadata["boot_device_type"] = "image" - boot_metadata["boot_image_id"] = image_info['id'] - else: - boot_metadata["boot_device_type"] = "volume" - volumes_attached = getattr( - server, "os-extended-volumes:volumes_attached", []) - for volume_attached in volumes_attached: - volume_id = volume_attached["id"] - volume_attach_metadata = attach_metadata.get( - volume_id, None) - if volume_attach_metadata is not None and ( - volume_attach_metadata["bootable"] == "true"): - boot_metadata["boot_volume_id"] = volume_id - boot_metadata["boot_attach_metadata"] = ( - volume_attach_metadata) - resource_definition["boot_metadata"] = boot_metadata - - # save all server's metadata - server_metadata = {"availability_zone": availability_zone, - "networks": networks, - "floating_ips": floating_ips, - "flavor": flavor, - "key_name": key_name, - "security_groups": security_groups, - } - resource_definition["server_metadata"] = server_metadata - LOG.info("Creating server backup, resource_definition: %s.", - resource_definition) - bank_section.update_object("metadata", resource_definition) - - # update resource_definition backup_status - bank_section.update_object("status", - constants.RESOURCE_STATUS_AVAILABLE) - LOG.info("Finish backup server, server_id: %s.", server_id) - except Exception as err: - # update resource_definition backup_status - LOG.exception("Create backup failed, server_id: %s.", server_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Server Backup", - reason=err, - resource_id=server_id, - resource_type=constants.SERVER_RESOURCE_TYPE) - - -class DeleteOperation(protection_plugin.Operation): - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - - LOG.info("deleting server backup, server_id: %s.", resource_id) - - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETING) - objects = bank_section.list_objects() - for obj in objects: - if obj == "status": - continue - bank_section.delete_object(obj) - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETED) - LOG.info("finish delete server, server_id: %s.", resource_id) - except Exception as err: - # update resource_definition backup_status - LOG.error("Delete backup failed, server_id: %s.", resource_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Server Backup", - reason=err, - resource_id=resource_id, - resource_type=constants.SERVER_RESOURCE_TYPE) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_server_id = resource.id - bank_section = checkpoint.get_resource_bank_section( - original_server_id) - LOG.info('Verifying the server backup, server_id: %s', - original_server_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, original_server_id) - - backup_status = bank_section.get_object("status") - - if backup_status == constants.RESOURCE_STATUS_AVAILABLE: - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ('The status of server backup status is %s.' - % backup_status) - update_method(backup_status, reason) - raise exception.VerifyResourceFailed( - name="Server backup", - reason=reason, - resource_id=original_server_id, - resource_type=resource.type) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - - def on_complete(self, checkpoint, resource, context, parameters, **kwargs): - original_server_id = resource.id - LOG.info("Restoring server backup, server_id: %s.", original_server_id) - - update_method = None - try: - resource_definition = checkpoint.get_resource_bank_section( - original_server_id).get_object("metadata") - - nova_client = ClientFactory.create_client("nova", context) - new_resources = kwargs.get("new_resources") - - # restore server instance - restore_net_id = parameters.get("restore_net_id", None) - restore_flavor_id = parameters.get("restore_flavor_id", None) - if restore_flavor_id: - resource_definition["server_metadata"]['flavor'] = ( - restore_flavor_id) - new_server_id = self._restore_server_instance( - nova_client, new_resources, original_server_id, - parameters.get("restore_name", "karbor-restore-server"), - restore_net_id, resource_definition) - - update_method = partial(utils.update_resource_restore_result, - kwargs.get('restore'), resource.type, - new_server_id) - update_method(constants.RESOURCE_STATUS_RESTORING) - self._wait_server_to_active(nova_client, new_server_id) - - # restore volume attachment - self._restore_volume_attachment( - nova_client, ClientFactory.create_client("cinder", context), - new_resources, new_server_id, resource_definition) - - # restore floating ip association - self._restore_floating_association( - nova_client, new_server_id, resource_definition) - - new_resources[original_server_id] = new_server_id - - update_method(constants.RESOURCE_STATUS_AVAILABLE) - - LOG.info("Finish restore server, server_id: %s.", - original_server_id) - - except Exception as e: - if update_method: - update_method(constants.RESOURCE_STATUS_ERROR, str(e)) - LOG.exception("Restore server backup failed, server_id: %s.", - original_server_id) - raise exception.RestoreResourceFailed( - name="Server Backup", - reason=e, - resource_id=original_server_id, - resource_type=constants.SERVER_RESOURCE_TYPE - ) - - def _restore_server_instance(self, nova_client, new_resources, - original_id, restore_name, restore_net_id, - resource_definition): - server_metadata = resource_definition["server_metadata"] - properties = { - "availability_zone": server_metadata.get("availability_zone"), - "flavor": server_metadata.get("flavor"), - "name": restore_name, - "image": None - } - - # server boot device - boot_metadata = resource_definition["boot_metadata"] - boot_device_type = boot_metadata.get("boot_device_type") - if boot_device_type == "image": - properties["image"] = new_resources.get( - boot_metadata["boot_image_id"]) - - elif boot_device_type == "volume": - properties["block_device_mapping_v2"] = [{ - 'uuid': new_resources.get( - boot_metadata["boot_volume_id"]), - 'source_type': 'volume', - 'destination_type': 'volume', - 'boot_index': 0, - 'delete_on_termination': False, - }] - else: - reason = "Can not find the boot device of the server." - LOG.error("Restore server backup failed, (server_id:" - "%(server_id)s): %(reason)s.", - {'server_id': original_id, - 'reason': reason}) - raise Exception(reason) - - # server key_name, security_groups, networks - properties["key_name"] = server_metadata.get("key_name", None) - - if server_metadata.get("security_groups"): - properties["security_groups"] = [ - security_group["name"] - for security_group in server_metadata["security_groups"] - ] - - if restore_net_id is not None: - properties["nics"] = [{'net-id': restore_net_id}] - elif server_metadata.get("networks"): - properties["nics"] = [ - {'net-id': network} - for network in server_metadata["networks"] - ] - - properties["userdata"] = None - - try: - server = nova_client.servers.create(**properties) - except Exception as ex: - LOG.error('Error creating server (server_id:%(server_id)s): ' - '%(reason)s', - {'server_id': original_id, - 'reason': ex}) - raise - - return server.id - - def _restore_volume_attachment(self, nova_client, cinder_client, - new_resources, new_server_id, - resource_definition): - attach_metadata = resource_definition.get("attach_metadata", {}) - for original_id, attach_metadata_item in attach_metadata.items(): - if attach_metadata_item.get("bootable", None) == "true": - continue - - volume_id = new_resources.get(original_id) - try: - nova_client.volumes.create_server_volume( - server_id=new_server_id, - volume_id=volume_id, - device=attach_metadata_item.get("device", None)) - - except Exception as ex: - LOG.error("Failed to attach volume %(vol)s to server %(srv)s, " - "reason: %(err)s", - {'vol': volume_id, - 'srv': new_server_id, - 'err': ex}) - raise - - self._wait_volume_to_attached(cinder_client, volume_id) - - def _restore_floating_association(self, nova_client, new_server_id, - resource_definition): - server_metadata = resource_definition["server_metadata"] - for floating_ip in server_metadata.get("floating_ips", []): - nova_client.servers.add_floating_ip( - nova_client.servers.get(new_server_id), floating_ip) - - def _wait_volume_to_attached(self, cinder_client, volume_id): - def _get_volume_status(): - try: - return cinder_client.volumes.get(volume_id).status - except Exception as ex: - LOG.error('Fetch volume(%(volume_id)s) status failed, ' - 'reason: %(reason)s', - {'volume_id': volume_id, - 'reason': ex}) - return 'ERROR' - - is_success = utils.status_poll( - _get_volume_status, - interval=self._interval, - success_statuses={'in-use', }, - failure_statuses={'ERROR', }, - ignore_statuses={'available', 'attaching'} - ) - if not is_success: - raise Exception('Attach the volume to server failed') - - def _wait_server_to_active(self, nova_client, server_id): - def _get_server_status(): - try: - server = self._fetch_server(nova_client, server_id) - return server.status.split('(')[0] if server else 'BUILD' - except Exception as ex: - LOG.error('Fetch server(%(server_id)s) failed, ' - 'reason: %(reason)s', - {'server_id': server_id, - 'reason': ex}) - return 'ERROR' - - is_success = utils.status_poll( - _get_server_status, - interval=self._interval, - success_statuses={'ACTIVE', }, - failure_statuses={'ERROR', }, - ignore_statuses={'BUILD', 'HARD_REBOOT', 'PASSWORD', 'REBOOT', - 'RESCUE', 'RESIZE', 'REVERT_RESIZE', 'SHUTOFF', - 'SUSPENDED', 'VERIFY_RESIZE'}, - ) - if not is_success: - raise Exception('The server does not start successfully') - - def _fetch_server(self, nova_client, server_id): - server = None - try: - server = nova_client.servers.get(server_id) - except exceptions.OverLimit as exc: - LOG.warning("Received an OverLimit response when " - "fetching server (%(id)s) : %(exception)s", - {'id': server_id, - 'exception': exc}) - except exceptions.ClientException as exc: - if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in - (500, 503))): - LOG.warning("Received the following exception when " - "fetching server (%(id)s) : %(exception)s", - {'id': server_id, - 'exception': exc}) - else: - raise - return server - - -class NovaProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.SERVER_RESOURCE_TYPE] - - def __init__(self, config=None): - super(NovaProtectionPlugin, self).__init__(config) - self._config.register_opts(nova_backup_opts, - 'nova_backup_protection_plugin') - self._poll_interval = ( - self._config.nova_backup_protection_plugin.poll_interval) - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resource_type): - return server_plugin_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resource_type): - return server_plugin_schemas.RESTORE_SCHEMA - - @classmethod - def get_verify_schema(cls, resources_type): - return server_plugin_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resource_type): - return server_plugin_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - def get_protect_operation(self, resource): - return ProtectOperation() - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_verify_operation(self, resource): - return VerifyOperation() - - def get_delete_operation(self, resource): - return DeleteOperation() diff --git a/karbor/services/protection/protection_plugins/server/server_plugin_schemas.py b/karbor/services/protection/protection_plugins/server/server_plugin_schemas.py deleted file mode 100644 index 39b682c0..00000000 --- a/karbor/services/protection/protection_plugins/server/server_plugin_schemas.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Server Protection Options", - "type": "object", - "properties": {}, - "required": [] -} - -RESTORE_SCHEMA = { - "title": "Server Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Server Name", - "description": "The name of the restore server", - }, - "restore_net_id": { - "type": "string", - "title": "Restore Server Net Id", - "description": "The net id of the restore server" - }, - "restore_flavor_id": { - "type": "string", - "title": "Restore Server Flavor Id", - "description": "The flavor id of the restore server" - } - }, - "required": ["restore_name"] -} - -VERIFY_SCHEMA = { - "title": "Server Protection Verify", - "type": "object", - "properties": {} -} - -# TODO(luobin) -SAVED_INFO_SCHEMA = { - "title": "Server Protection Saved Info", - "type": "object", - "properties": { - "attach_metadata": { - "type": "object", - "title": "Attached Volume Metadata", - "description": "The devices of attached volumes" - }, - "snapshot_metadata": { - "type": "object", - "title": "Snapshot Metadata", - "description": "The metadata of snapshot" - }, - }, - "required": ["attached_metadata", "snapshot_metadata"] -} diff --git a/karbor/services/protection/protection_plugins/share/__init__.py b/karbor/services/protection/protection_plugins/share/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protection_plugins/share/share_snapshot_plugin.py b/karbor/services/protection/protection_plugins/share/share_snapshot_plugin.py deleted file mode 100644 index 5aa5ddac..00000000 --- a/karbor/services/protection/protection_plugins/share/share_snapshot_plugin.py +++ /dev/null @@ -1,354 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial -import six - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins.share \ - import share_snapshot_plugin_schemas as share_schemas -from karbor.services.protection.protection_plugins import utils -from manilaclient import exceptions as manila_exc -from oslo_config import cfg -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -manila_snapshot_opts = [ - cfg.IntOpt( - 'poll_interval', default=15, - help='Poll interval for Manila share status.' - ) -] - -SHARE_FAILURE_STATUSES = {'error', 'error_deleting', 'deleting', - 'not-found', 'extending_error', - 'shrinking_error', 'reverting_error'} - -SHARE_IGNORE_STATUSES = {'creating', 'reverting', 'extending', - 'shrinking'} - - -def get_snapshot_status(manila_client, snapshot_id): - return get_resource_status(manila_client.share_snapshots, snapshot_id, - 'snapshot') - - -def get_share_status(manila_client, share_id): - return get_resource_status(manila_client.shares, share_id, 'share') - - -def get_resource_status(resource_manager, resource_id, resource_type): - LOG.debug('Polling %(resource_type)s (id: %(resource_id)s)', - {'resource_type': resource_type, 'resource_id': resource_id}) - try: - resource = resource_manager.get(resource_id) - status = resource.status - except manila_exc.NotFound: - status = 'not-found' - LOG.debug( - 'Polled %(resource_type)s (id: %(resource_id)s) status: %(status)s', - {'resource_type': resource_type, 'resource_id': resource_id, - 'status': status} - ) - return status - - -class ProtectOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(ProtectOperation, self).__init__() - self._interval = poll_interval - - def _create_snapshot(self, manila_client, share_id, snapshot_name, - description, force): - snapshot = manila_client.share_snapshots.create( - share=share_id, - name=snapshot_name, - description=description, - force=force - ) - - snapshot_id = snapshot.id - is_success = utils.status_poll( - partial(get_snapshot_status, manila_client, snapshot_id), - interval=self._interval, - success_statuses={'available'}, - failure_statuses={'error'}, - ignore_statuses={'creating'}, - ignore_unexpected=True - ) - - if not is_success: - try: - snapshot = manila_client.share_snapshots.get(snapshot_id) - except Exception: - reason = 'Unable to find snapshot.' - else: - reason = 'The status of snapshot is %s' % snapshot.status - raise exception.CreateResourceFailed( - name="Share Snapshot", - reason=reason, resource_id=share_id, - resource_type=constants.SHARE_RESOURCE_TYPE) - - return snapshot_id - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - share_id = resource.id - bank_section = checkpoint.get_resource_bank_section(share_id) - manila_client = ClientFactory.create_client('manila', context) - LOG.info('creating share snapshot, share_id: %s', share_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_PROTECTING) - share_info = manila_client.shares.get(share_id) - if share_info.status != "available": - is_success = utils.status_poll( - partial(get_share_status, manila_client, share_id), - interval=self._interval, success_statuses={'available'}, - failure_statuses=SHARE_FAILURE_STATUSES, - ignore_statuses=SHARE_IGNORE_STATUSES, - ) - if not is_success: - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Share Snapshot", - reason='Share is in a error status.', - resource_id=share_id, - resource_type=constants.SHARE_RESOURCE_TYPE, - ) - resource_metadata = { - 'share_id': share_id, - 'size': share_info.size, - 'share_proto': share_info.share_proto, - 'share_type': share_info.share_type, - 'share_network_id': share_info.share_network_id - } - snapshot_name = parameters.get('snapshot_name', None) - description = parameters.get('description', None) - force = parameters.get('force', False) - try: - snapshot_id = self._create_snapshot(manila_client, share_id, - snapshot_name, - description, force) - except exception.CreateResourceFailed as e: - LOG.error('Error creating snapshot (share_id: %(share_id)s ' - ': %(reason)s', {'share_id': share_id, 'reason': e}) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise - - resource_metadata['snapshot_id'] = snapshot_id - - bank_section.update_object('metadata', resource_metadata) - bank_section.update_object('status', - constants.RESOURCE_STATUS_AVAILABLE) - LOG.info('Snapshot share (share_id: %(share_id)s snapshot_id: ' - '%(snapshot_id)s ) successfully', - {'share_id': share_id, 'snapshot_id': snapshot_id}) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_share_id = resource.id - bank_section = checkpoint.get_resource_bank_section(original_share_id) - manila_client = ClientFactory.create_client('manila', context) - resource_metadata = bank_section.get_object('metadata') - restore_name = parameters.get('restore_name', - '%s@%s' % (checkpoint.id, - original_share_id)) - restore_description = parameters.get('restore_description', None) - snapshot_id = resource_metadata['snapshot_id'] - share_proto = resource_metadata['share_proto'] - size = resource_metadata['size'] - share_type = resource_metadata['share_type'] - share_network_id = resource_metadata['share_network_id'] - restore = kwargs.get('restore') - LOG.info("Restoring a share from snapshot, " - "original_share_id: %s.", original_share_id) - try: - share = manila_client.shares.create( - share_proto, size, snapshot_id=snapshot_id, - name=restore_name, description=restore_description, - share_network=share_network_id, share_type=share_type) - is_success = utils.status_poll( - partial(get_share_status, manila_client, share.id), - interval=self._interval, success_statuses={'available'}, - failure_statuses=SHARE_FAILURE_STATUSES, - ignore_statuses=SHARE_IGNORE_STATUSES - ) - if is_success is not True: - LOG.error('The status of share is invalid. status:%s', - share.status) - restore.update_resource_status( - constants.SHARE_RESOURCE_TYPE, - share.id, share.status, "Invalid status.") - restore.save() - raise exception.RestoreResourceFailed( - name="Share Snapshot", - reason="Invalid status.", - resource_id=original_share_id, - resource_type=constants.SHARE_RESOURCE_TYPE) - restore.update_resource_status(constants.SHARE_RESOURCE_TYPE, - share.id, share.status) - restore.save() - except Exception as e: - LOG.error("Restore share from snapshot failed, share_id: %s.", - original_share_id) - raise exception.RestoreResourceFailed( - name="Share Snapshot", - reason=e, resource_id=original_share_id, - resource_type=constants.SHARE_RESOURCE_TYPE) - LOG.info("Finish restoring a share from snapshot, share_id: %s.", - original_share_id) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_share_id = resource.id - bank_section = checkpoint.get_resource_bank_section(original_share_id) - manila_client = ClientFactory.create_client('manila', context) - resource_metadata = bank_section.get_object('metadata') - LOG.info('Verifying the share snapshot, share_id: %s', - original_share_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, original_share_id) - - snapshot_id = resource_metadata['snapshot_id'] - try: - share_snapshot = manila_client.share_snapshots.get(snapshot_id) - snapshot_status = share_snapshot.status - except Exception as ex: - LOG.error('Getting share snapshot (snapshot_id: %(snapshot_id)s):' - '%(reason)s fails', - {'snapshot_id': snapshot_id, 'reason': ex}) - reason = 'Getting share snapshot fails.' - update_method(constants.RESOURCE_STATUS_ERROR, reason) - raise - - if snapshot_status == 'available': - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ('The status of share snapshot status is %s.' - % snapshot_status) - update_method(snapshot_status, reason) - raise exception.VerifyResourceFailed( - name="Share snapshot", - reason=reason, - resource_id=original_share_id, - resource_type=resource.type - ) - - -class DeleteOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(DeleteOperation, self).__init__() - self._interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - snapshot_id = None - try: - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETING) - resource_metadata = bank_section.get_object('metadata') - snapshot_id = resource_metadata['snapshot_id'] - manila_client = ClientFactory.create_client('manila', context) - try: - snapshot = manila_client.share_snapshots.get(snapshot_id) - manila_client.share_snapshots.delete(snapshot) - except manila_exc.NotFound: - LOG.info('Snapshot id: %s not found. Assuming deleted', - snapshot_id) - is_success = utils.status_poll( - partial(get_snapshot_status, manila_client, snapshot_id), - interval=self._interval, - success_statuses={'deleted', 'not-found'}, - failure_statuses={'error', 'error_deleting'}, - ignore_statuses={'deleting'}, - ignore_unexpected=True - ) - if not is_success: - raise exception.NotFound() - bank_section.delete_object('metadata') - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETED) - except Exception as e: - LOG.error('Delete share snapshot failed, snapshot_id: %s', - snapshot_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Share Snapshot", - reason=six.text_type(e), - resource_id=resource_id, - resource_type=constants.SHARE_RESOURCE_TYPE - ) - - -class ManilaSnapshotProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.SHARE_RESOURCE_TYPE] - - def __init__(self, config=None): - super(ManilaSnapshotProtectionPlugin, self).__init__(config) - self._config.register_opts(manila_snapshot_opts, - 'manila_snapshot_plugin') - self._plugin_config = self._config.manila_snapshot_plugin - self._poll_interval = self._plugin_config.poll_interval - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resources_type): - return share_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resources_type): - return share_schemas.RESTORE_SCHEMA - - @classmethod - def get_verify_schema(cls, resources_type): - return share_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resources_type): - return share_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - def get_protect_operation(self, resource): - return ProtectOperation(self._poll_interval) - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_verify_operation(self, resource): - return VerifyOperation() - - def get_delete_operation(self, resource): - return DeleteOperation(self._poll_interval) diff --git a/karbor/services/protection/protection_plugins/share/share_snapshot_plugin_schemas.py b/karbor/services/protection/protection_plugins/share/share_snapshot_plugin_schemas.py deleted file mode 100644 index 45138229..00000000 --- a/karbor/services/protection/protection_plugins/share/share_snapshot_plugin_schemas.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Share Protection Options", - "type": "object", - "properties": { - "snapshot_name": { - "type": "string", - "title": "Snapshot Name", - "description": "The name of the snapshot.", - "default": None - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the volume." - }, - "force": { - "type": "boolean", - "title": "Force", - "description": "Whether to backup, even if the volume " - "is attached", - "default": False - } - }, - "required": ["snapshot_name", "force"] -} - -RESTORE_SCHEMA = { - "title": "Share Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Share Name", - "description": "The name of the restore share", - "default": None - }, - }, - "required": ["restore_name"] -} - -VERIFY_SCHEMA = { - "title": "Share snapshot Verify", - "type": "object", - "properties": {} -} - -SAVED_INFO_SCHEMA = { - "title": "Share Protection Saved Info", - "type": "object", - "properties": {}, - "required": [] -} diff --git a/karbor/services/protection/protection_plugins/utils.py b/karbor/services/protection/protection_plugins/utils.py deleted file mode 100644 index 83824906..00000000 --- a/karbor/services/protection/protection_plugins/utils.py +++ /dev/null @@ -1,125 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from io import BytesIO -import os - -from oslo_log import log as logging -from oslo_service import loopingcall - -from karbor.services.protection.bank_plugin import BankIO - -LOG = logging.getLogger(__name__) - - -def backup_image_to_bank(glance_client, image_id, bank_section, object_size): - image_response = glance_client.images.data(image_id, do_checksum=True) - bank_chunk_num = int(object_size / 65536) - - image_chunks_num = 0 - chunks_num = 1 - image_response_data = BytesIO() - for chunk in image_response: - image_response_data.write(chunk) - image_chunks_num += 1 - if image_chunks_num == bank_chunk_num: - image_chunks_num = 0 - image_response_data.seek(0, os.SEEK_SET) - data = image_response_data.read(object_size) - bank_section.update_object("data_" + str(chunks_num), data) - image_response_data.truncate(0) - image_response_data.seek(0, os.SEEK_SET) - chunks_num += 1 - - image_response_data.seek(0, os.SEEK_SET) - data = image_response_data.read() - if data != '': - bank_section.update_object("data_" + str(chunks_num), data) - else: - chunks_num -= 1 - return chunks_num - - -def restore_image_from_bank(glance_client, bank_section, restore_name): - resource_definition = bank_section.get_object('metadata') - image_metadata = resource_definition['image_metadata'] - objects = [key.split("/")[-1] for key in - bank_section.list_objects() - if (key.split("/")[-1]).startswith("data_")] - - chunks_num = resource_definition.get("chunks_num", 0) - if len(objects) != int(chunks_num): - raise Exception("The chunks num of restored image is invalid") - - sorted_objects = sorted(objects, key=lambda s: int(s[5:])) - image_data = BankIO(bank_section, sorted_objects) - disk_format = image_metadata["disk_format"] - container_format = image_metadata["container_format"] - image = glance_client.images.create( - disk_format=disk_format, - container_format=container_format, - name=restore_name - ) - glance_client.images.upload(image.id, image_data) - image_info = glance_client.images.get(image.id) - if image_info.checksum != image_metadata["checksum"]: - raise Exception("The checksum of restored image is invalid") - return image_info - - -def update_resource_restore_result(restore_record, resource_type, resource_id, - status, reason=''): - try: - restore_record.update_resource_status(resource_type, resource_id, - status, reason) - restore_record.save() - except Exception: - LOG.error('Unable to update restoration result. ' - 'resource type: %(resource_type)s, ' - 'resource id: %(resource_id)s, ' - 'status: %(status)s, reason: %(reason)s', - {'resource_type': resource_type, 'resource_id': resource_id, - 'status': status, 'reason': reason}) - pass - - -def status_poll(get_status_func, interval, success_statuses=set(), - failure_statuses=set(), ignore_statuses=set(), - ignore_unexpected=False): - def _poll(): - status = get_status_func() - if status in success_statuses: - raise loopingcall.LoopingCallDone(retvalue=True) - if status in failure_statuses: - raise loopingcall.LoopingCallDone(retvalue=False) - if status in ignore_statuses: - return - if ignore_unexpected is False: - raise loopingcall.LoopingCallDone(retvalue=False) - - loop = loopingcall.FixedIntervalLoopingCall(_poll) - return loop.start(interval=interval, initial_delay=interval).wait() - - -def update_resource_verify_result(verify_record, resource_type, resource_id, - status, reason=''): - try: - verify_record.update_resource_status(resource_type, resource_id, - status, reason) - verify_record.save() - except Exception: - LOG.error('Unable to update verify result. ' - 'resource type: %(resource_type)s, ' - 'resource id: %(resource_id)s, ' - 'status: %(status)s, reason: %(reason)s', - {'resource_type': resource_type, 'resource_id': resource_id, - 'status': status, 'reason': reason}) - raise diff --git a/karbor/services/protection/protection_plugins/volume/__init__.py b/karbor/services/protection/protection_plugins/volume/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/services/protection/protection_plugins/volume/cinder_protection_plugin.py b/karbor/services/protection/protection_plugins/volume/cinder_protection_plugin.py deleted file mode 100644 index 3d2bcf91..00000000 --- a/karbor/services/protection/protection_plugins/volume/cinder_protection_plugin.py +++ /dev/null @@ -1,446 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial -import six - -from cinderclient import exceptions as cinder_exc -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins import utils -from karbor.services.protection.protection_plugins.volume \ - import volume_plugin_cinder_schemas as cinder_schemas - - -LOG = logging.getLogger(__name__) - -cinder_backup_opts = [ - cfg.IntOpt( - 'poll_interval', default=15, - help='Poll interval for Cinder backup status' - ), - cfg.BoolOpt( - 'backup_from_snapshot', default=True, - help='First take a snapshot of the volume, and backup from ' - 'it. Minimizes the time the volume is unavailable.' - ), -] - - -def get_backup_status(cinder_client, backup_id): - return get_resource_status(cinder_client.backups, backup_id, 'backup') - - -def get_volume_status(cinder_client, volume_id): - return get_resource_status(cinder_client.volumes, volume_id, 'volume') - - -def get_snapshot_status(cinder_client, snapshot_id): - return get_resource_status(cinder_client.volume_snapshots, snapshot_id, - 'snapshot') - - -def get_resource_status(resource_manager, resource_id, resource_type): - LOG.debug('Polling %(resource_type)s (id: %(resource_id)s)', { - 'resource_type': resource_type, - 'resource_id': resource_id, - }) - try: - resource = resource_manager.get(resource_id) - status = resource.status - except cinder_exc.NotFound: - status = 'not-found' - LOG.debug( - 'Polled %(resource_type)s (id: %(resource_id)s) status: %(status)s', - { - 'resource_type': resource_type, - 'resource_id': resource_id, - 'status': status - } - ) - return status - - -class ProtectOperation(protection_plugin.Operation): - def __init__(self, poll_interval, backup_from_snapshot): - super(ProtectOperation, self).__init__() - self._interval = poll_interval - self._backup_from_snapshot = backup_from_snapshot - self.snapshot_id = None - - def _create_snapshot(self, cinder_client, volume_id): - snapshot = cinder_client.volume_snapshots.create(volume_id, force=True) - - snapshot_id = snapshot.id - is_success = utils.status_poll( - partial(get_snapshot_status, cinder_client, snapshot_id), - interval=self._interval, - success_statuses={'available', }, - failure_statuses={'error', 'error_deleting', 'deleting', - 'not-found'}, - ignore_statuses={'creating', }, - ) - if not is_success: - raise Exception - - return snapshot_id - - def _delete_snapshot(self, cinder_client, snapshot_id): - LOG.info('Cleaning up snapshot (snapshot_id: %s)', snapshot_id) - cinder_client.volume_snapshots.delete(snapshot_id) - return utils.status_poll( - partial(get_snapshot_status, cinder_client, snapshot_id), - interval=self._interval, - success_statuses={'not-found', }, - failure_statuses={'error', 'error_deleting', 'creating'}, - ignore_statuses={'deleting', }, - ) - - def _create_backup(self, cinder_client, volume_id, backup_name, - description, snapshot_id=None, incremental=False, - container=None, force=False): - backup = cinder_client.backups.create( - volume_id=volume_id, - name=backup_name, - description=description, - force=force, - snapshot_id=snapshot_id, - incremental=incremental, - container=container - ) - - backup_id = backup.id - is_success = utils.status_poll( - partial(get_backup_status, cinder_client, backup_id), - interval=self._interval, - success_statuses={'available'}, - failure_statuses={'error'}, - ignore_statuses={'creating'}, - ) - - if not is_success: - try: - backup = cinder_client.backups.get(backup_id) - except Exception: - reason = 'Unable to find backup' - else: - reason = backup.fail_reason - raise Exception(reason) - - return backup_id - - def on_prepare_finish(self, checkpoint, resource, context, parameters, - **kwargs): - volume_id = resource.id - if not self._backup_from_snapshot: - LOG.info('Skipping taking snapshot of volume %s - backing up ' - 'directly', volume_id) - return - - LOG.info('Taking snapshot of volume %s', volume_id) - bank_section = checkpoint.get_resource_bank_section(volume_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_PROTECTING) - cinder_client = ClientFactory.create_client('cinder', context) - try: - self.snapshot_id = self._create_snapshot(cinder_client, volume_id) - except Exception: - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Volume Backup", - reason='Error creating snapshot for volume', - resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE, - ) - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - volume_id = resource.id - bank_section = checkpoint.get_resource_bank_section(volume_id) - cinder_client = ClientFactory.create_client('cinder', context) - LOG.info('creating volume backup, volume_id: %s', volume_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_PROTECTING) - resource_metadata = { - 'volume_id': volume_id, - } - is_success = utils.status_poll( - partial(get_volume_status, cinder_client, volume_id), - interval=self._interval, - success_statuses={'available', 'in-use', 'error_extending', - 'error_restoring'}, - failure_statuses={'error', 'error_deleting', 'deleting', - 'not-found'}, - ignore_statuses={'attaching', 'creating', 'backing-up', - 'restoring-backup'}, - ) - if not is_success: - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Volume Backup", - reason='Volume is in errorneous state', - resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE, - ) - - backup_name = parameters.get('backup_name', None) - description = parameters.get('description', None) - backup_mode = parameters.get('backup_mode', "full") - container = parameters.get('container', None) - force = parameters.get('force', False) - incremental = False - if backup_mode == "incremental": - incremental = True - elif backup_mode == "full": - incremental = False - - try: - backup_id = self._create_backup(cinder_client, volume_id, - backup_name, description, - self.snapshot_id, - incremental, container, force) - except Exception as e: - LOG.error('Error creating backup (volume_id: %(volume_id)s ' - 'snapshot_id: %(snapshot_id)s): %(reason)s', - {'volume_id': volume_id, - 'snapshot_id': self.snapshot_id, - 'reason': e} - ) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Volume Backup", - reason=e, - resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE, - ) - - resource_metadata['backup_id'] = backup_id - bank_section.update_object('metadata', resource_metadata) - bank_section.update_object('status', - constants.RESOURCE_STATUS_AVAILABLE) - LOG.info('Backed up volume (volume_id: %(volume_id)s snapshot_id: ' - '%(snapshot_id)s backup_id: %(backup_id)s) successfully', - {'backup_id': backup_id, - 'snapshot_id': self.snapshot_id, - 'volume_id': volume_id} - ) - - if self.snapshot_id: - try: - self._delete_snapshot(cinder_client, self.snapshot_id) - except Exception as e: - LOG.warning('Failed deleting snapshot: %(snapshot_id)s. ' - 'Reason: %(reason)s', - {'snapshot_id': self.snapshot_id, 'reason': e}) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - resource_metadata = bank_section.get_object('metadata') - cinder_client = ClientFactory.create_client('cinder', context) - - # create volume - volume_property = { - 'name': parameters.get( - 'restore_name', '%s@%s' % (checkpoint.id, resource_id)) - } - if 'restore_description' in parameters: - volume_property['description'] = parameters['restore_description'] - backup_id = resource_metadata['backup_id'] - try: - volume_id = cinder_client.restores.restore(backup_id).volume_id - cinder_client.volumes.update(volume_id, **volume_property) - except Exception as ex: - LOG.error('Error creating volume (backup_id: %(backup_id)s): ' - '%(reason)s', - {'backup_id': backup_id, - 'reason': ex}) - raise - - # check and update status - update_method = partial( - utils.update_resource_restore_result, - kwargs.get('restore'), resource.type, volume_id) - - update_method(constants.RESOURCE_STATUS_RESTORING) - - is_success = self._check_create_complete(cinder_client, volume_id) - if is_success: - update_method(constants.RESOURCE_STATUS_AVAILABLE) - kwargs.get("new_resources")[resource_id] = volume_id - else: - reason = 'Error creating volume' - update_method(constants.RESOURCE_STATUS_ERROR, reason) - - raise exception.RestoreResourceFailed( - name="Volume Backup", - reason=reason, - resource_id=resource_id, - resource_type=resource.type - ) - - def _check_create_complete(self, cinder_client, volume_id): - return utils.status_poll( - partial(get_volume_status, cinder_client, volume_id), - interval=self._interval, - success_statuses={'available'}, - failure_statuses={'error', 'not-found'}, - ignore_statuses={'creating', 'restoring-backup', 'downloading'}, - ) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - resource_metadata = bank_section.get_object('metadata') - cinder_client = ClientFactory.create_client('cinder', context) - LOG.info('Verifying the volume backup, volume_id: %s', resource_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, resource_id) - - backup_id = resource_metadata['backup_id'] - try: - volume_backup = cinder_client.backups.get(backup_id) - backup_status = volume_backup.status - except Exception as ex: - LOG.error('Error get volume backup (backup_id: %(backup_id)s): ' - '%(reason)s', {'backup_id': backup_id, 'reason': ex}) - reason = 'Error getting volume backup.' - update_method(constants.RESOURCE_STATUS_ERROR, reason) - raise - - if backup_status == 'available': - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ( - 'The status of volume backup status is %s.' % backup_status) - update_method(backup_status, reason) - raise exception.VerifyResourceFailed( - name="Volume Backup", - reason=reason, - resource_id=resource_id, - resource_type=resource.type - ) - - -class DeleteOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(DeleteOperation, self).__init__() - self._interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - backup_id = None - try: - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETING) - resource_metadata = bank_section.get_object('metadata') - backup_id = resource_metadata['backup_id'] - cinder_client = ClientFactory.create_client('cinder', context) - try: - backup = cinder_client.backups.get(backup_id) - cinder_client.backups.delete(backup) - except cinder_exc.NotFound: - LOG.info('Backup id: %s not found. Assuming deleted', - backup_id) - is_success = utils.status_poll( - partial(get_backup_status, cinder_client, backup_id), - interval=self._interval, - success_statuses={'deleted', 'not-found'}, - failure_statuses={'error', 'error_deleting'}, - ignore_statuses={'deleting'}, - ) - if not is_success: - raise exception.NotFound() - bank_section.delete_object('metadata') - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETED) - except Exception as e: - LOG.error('delete volume backup failed, backup_id: %s', backup_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Volume Backup", - reason=six.text_type(e), - resource_id=resource_id, - resource_type=constants.VOLUME_RESOURCE_TYPE - ) - - -class CinderBackupProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.VOLUME_RESOURCE_TYPE] - - def __init__(self, config=None): - super(CinderBackupProtectionPlugin, self).__init__(config) - self._config.register_opts(cinder_backup_opts, - 'cinder_backup_protection_plugin') - self._plugin_config = self._config.cinder_backup_protection_plugin - self._poll_interval = self._plugin_config.poll_interval - self._backup_from_snapshot = self._plugin_config.backup_from_snapshot - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resources_type): - return cinder_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resources_type): - return cinder_schemas.RESTORE_SCHEMA - - @classmethod - def get_verify_schema(cls, resources_type): - return cinder_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resources_type): - return cinder_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - # TODO(hurong) - pass - - def get_protect_operation(self, resource): - return ProtectOperation(self._poll_interval, - self._backup_from_snapshot) - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_verify_operation(self, resource): - return VerifyOperation() - - def get_delete_operation(self, resource): - return DeleteOperation(self._poll_interval) diff --git a/karbor/services/protection/protection_plugins/volume/volume_freezer_plugin.py b/karbor/services/protection/protection_plugins/volume/volume_freezer_plugin.py deleted file mode 100644 index fa71bd5b..00000000 --- a/karbor/services/protection/protection_plugins/volume/volume_freezer_plugin.py +++ /dev/null @@ -1,471 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from functools import partial -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins import utils -from karbor.services.protection.protection_plugins.volume import \ - volume_freezer_plugin_schemas - -LOG = logging.getLogger(__name__) - -freezer_backup_opts = [ - cfg.IntOpt( - 'poll_interval', default=20, - help='Poll interval for Freezer Backup Resource status.' - ), - cfg.StrOpt( - 'scheduler_client_id', default=None, - help='The freezer scheduler client id to schedule the jobs' - ), - cfg.StrOpt( - 'container', default='karbor', - help='The container for Freezer backup storage.' - ), - cfg.StrOpt( - 'storage', default='swift', - help='The storage type for Freezer backup storage.' - ), - cfg.StrOpt( - 'ssh_key', - help='The ssh key for Freezer ssh driver.' - ), - cfg.StrOpt( - 'ssh_username', - help='The ssh user name for Freezer ssh driver.' - ), - cfg.StrOpt( - 'ssh_host', - help='The ssh host for Freezer ssh driver.' - ), - cfg.StrOpt( - 'ssh_port', - help='The ssh port for Freezer ssh driver.' - ), - cfg.StrOpt( - 'endpoint', - help='The storage endpoint for Freezer S3 driver.' - ), - cfg.StrOpt( - 'access_key', - help='The storage access key for Freezer S3 driver.' - ), - cfg.StrOpt( - 'secret_key', - help='The storage secret key for Freezer S3 driver.' - ) -] - - -def get_job_status(freezer_job_operation, job_id): - LOG.debug('Polling freezer job status, job_id: {0}'.format(job_id)) - job_status = freezer_job_operation.get_status(job_id) - LOG.debug('Polled freezer job status, job_id: {0}, job_status: {1}'.format( - job_id, job_status - )) - return job_status - - -class FreezerStorage(object): - def __init__(self, storage_type, storage_path, **kwargs): - self.storage_type = storage_type - self.storage_path = storage_path - self.config = kwargs - - def get_storage(self): - - storage = { - 'storage': self.storage_type, - 'container': self.storage_path - } - - if self.storage_type == 's3': - storage['endpoint'] = self.config.get('endpoint', None) - storage['access_key'] = self.config.get('access_key', None) - storage['secret_key'] = self.config.get('secret_key', None) - - if self.storage_type == 'ssh': - storage['ssh_key'] = self.config.get('ssh_key', None) - storage['ssh_port'] = self.config.get('ssh_port', None) - storage['ssh_username'] = self.config.get('ssh_username', None) - storage['ssh_host'] = self.config.get('ssh_host', None) - - return storage - - -class FreezerTask(object): - def __init__(self, context): - self.context = context - self.client = ClientFactory.create_client('freezer', self.context) - - def _client(self): - return self.client - - def get(self, job_id): - return self._client().jobs.get(job_id) - - def get_status(self, job_id): - return self._client().jobs.get(job_id).get('job_schedule', - {}).get('result') - - def create(self, backup_name, storage, description, resource, - action_type, scheduler_client_id): - return self._build(backup_name, storage, description, - resource, action_type, scheduler_client_id) - - def create_delete_job(self, job): - for job_action in job['job_actions']: - job_action['freezer_action']['action'] = 'admin' - job_action['freezer_action']['remove_older_than'] = '-1' - job_id = self._client().jobs.create(job) - self._client().jobs.start_job(job_id) - return job_id, job - - def create_restore_job(self, job): - for job_action in job['job_actions']: - job_action['freezer_action']['action'] = 'restore' - job_id = self._client().jobs.create(job) - self._client().jobs.start_job(job_id) - return job_id, job - - def delete(self, job_id): - actions = self.actions(job_id) - for action in actions: - self._client().actions.delete(action.get('action_id')) - return self._client().jobs.delete(job_id) - - def actions(self, job_id): - job = self.get(job_id) - if not job: - return [] - return job.get('job_actions', []) - - def _build(self, backup_name, storage, description, - resource, action_type, scheduler_client_id): - client_id = scheduler_client_id if scheduler_client_id else \ - FreezerSchedulerClient(self._client()).get_random_client_id() - job = { - 'description': resource.id if not description else description, - 'job_actions': [self._build_action( - backup_name=backup_name, - storage=storage, - resource=resource, - action_type=action_type, - )], - 'client_id': client_id - } - - job_id = self._client().jobs.create(job) - self._client().jobs.start_job(job_id) - return job_id, job - - @staticmethod - def _build_action(backup_name, storage, resource, action_type): - backup_name = backup_name.replace(' ', '_') - action = { - 'backup_name': backup_name, - 'action': action_type, - 'mode': 'cinder', - 'cinder_vol_id': resource.id - } - - action = dict(action, **storage.get_storage()) - - if action_type == 'admin': - action['remove_older_than'] = '-1' - - return {'freezer_action': action} - - -class FreezerSchedulerClient(object): - """Freezer scheduler to schedule the jobs. - - All the freezer scheduler clients should be able to schedule jobs - which resource type is nova instance or cinder volume. - """ - - def __init__(self, freezer_client): - self.client = freezer_client - - def get_random_client_id(self): - clients = self.client.clients.list() - if len(clients) < 1: - raise Exception('No freezer-scheduler client exist') - client_index = random.randint(0, len(clients) - 1) - return [ - c.get('client', {}).get('client_id') for c in clients - ][client_index] - - -class ProtectOperation(protection_plugin.Operation): - def __init__(self, poll_interval, freezer_storage, scheduler_client_id): - super(ProtectOperation, self).__init__() - self._poll_interval = poll_interval - self._scheduler_client_id = scheduler_client_id - self.freezer_storage = freezer_storage - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - - LOG.info('Creating freezer protection backup, resource_id: {0}' - .format(resource_id)) - bank_section.update_object('status', - constants.RESOURCE_STATUS_PROTECTING) - - backup_name = parameters.get('backup_name', 'backup{0}' - .format(resource_id)) - description = parameters.get('description', None) - self.freezer_storage.storage_path = "{0}/{1}".format( - self.freezer_storage.storage_path, checkpoint.id) - job_id, job_info = None, None - freezer_task = FreezerTask(context) - try: - job_id, job_info = freezer_task.create( - backup_name=backup_name, - storage=self.freezer_storage, - description=description, - resource=resource, - action_type='backup', - scheduler_client_id=self._scheduler_client_id - ) - LOG.debug('Creating freezer backup job successful, job_id: {0}' - .format(job_id)) - is_success = utils.status_poll( - partial(get_job_status, freezer_task, job_id), - interval=self._poll_interval, - success_statuses={'success'}, - failure_statuses={'fail'}, - ignore_statuses={'aborted', ''}, - ignore_unexpected=True - ) - - if is_success is not True: - LOG.error("The status of freezer job (id: {0}) is invalid." - .format(job_id)) - raise exception.CreateResourceFailed( - name="Freezer Backup FreezerTask", - reason="The status of freezer job is invalid.", - resource_id=resource_id, - resource_type=resource.type) - - resource_definition = { - 'job_id': job_id, - 'job_info': job_info - } - - bank_section.update_object("metadata", resource_definition) - - bank_section.update_object("status", - constants.RESOURCE_STATUS_AVAILABLE) - - except exception.CreateResourceFailed as e: - LOG.error('Error creating backup (resource_id: {0}, reason: {1})' - .format(resource_id, e)) - if job_id: - freezer_task.delete(job_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise - LOG.debug('Finish creating freezer backup resource') - freezer_task.delete(job_id) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._poll_interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - LOG.info("Creating freezer protection backup, resource_id: {0}" - .format(resource_id)) - - resource_metadata = bank_section.get_object('metadata') - freezer_job_info = resource_metadata.get('job_info', None) - if not freezer_job_info: - raise exception.RestoreResourceFailed( - name='Freezer Backup FreezerTask', - reason='The content of freezer job is invalid.', - resource_id=resource_id, - resource_type=resource.type - ) - freezer_task = FreezerTask(context) - job_id, job_info = None, None - try: - job_id, job_info = freezer_task.create_restore_job( - freezer_job_info - ) - is_success = utils.status_poll( - partial(get_job_status, freezer_task, job_id), - interval=self._poll_interval, - success_statuses={'success'}, - failure_statuses={'fail'}, - ignore_statuses={'aborted', ''}, - ignore_unexpected=True - ) - - if is_success is not True: - LOG.error("The status of freezer job (id: {0}) is invalid." - .format(job_id)) - raise exception.RestoreResourceFailed( - name="Freezer Backup FreezerTask", - reason="The status of freezer job is invalid.", - resource_id=resource_id, - resource_type=resource.type - ) - - except Exception as e: - LOG.error("Restore freezer backup resource failed, resource_type:" - "{0}, resource_id: {1}" - .format(resource.type, resource.id)) - if job_id: - freezer_task.delete(job_id) - raise exception.RestoreResourceFailed( - name="Freezer Backup FreezerTask", - reason=e, - resource_id=resource_id, - resource_type=resource.type - ) - LOG.debug('Finish restoring freezer backup resource') - freezer_task.delete(job_id) - - -class DeleteOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(DeleteOperation, self).__init__() - self._poll_interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - LOG.info("Deleting freezer protection backup, resource_id: {0}" - .format(resource_id)) - - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETING) - resource_metadata = bank_section.get_object('metadata') - freezer_task_info = resource_metadata.get('job_info', None) - if not freezer_task_info: - raise exception.DeleteResourceFailed( - name='Freezer Backup FreezerTask', - reason='The content of freezer job is invalid.', - resource_id=resource_id, - resource_type=resource.type - ) - - freezer_job_operation = FreezerTask(context) - job_id, job_info = None, None - try: - job_id, job_info = freezer_job_operation.create_delete_job( - freezer_task_info - ) - - is_success = utils.status_poll( - partial(get_job_status, freezer_job_operation, job_id), - interval=self._poll_interval, - success_statuses={'success'}, - failure_statuses={'fail'}, - ignore_statuses={'aborted', ''}, - ignore_unexpected=True - ) - - if is_success is not True: - LOG.error("The status of freezer job (id: {0}) is invalid." - .format(job_id)) - raise exception.CreateResourceFailed( - name="Freezer Backup FreezerTask", - reason="The status of freezer job is invalid.", - resource_id=resource_id, - resource_type=resource.type - ) - except Exception as e: - LOG.error("Delete freezer backup resource failed, resource_type:" - "{0}, resource_id: {1}" - .format(resource.type, resource.id)) - if job_id: - freezer_job_operation.delete(job_id) - raise exception.DeleteResourceFailed( - name="Freezer Backup FreezerTask", - reason=e, - resource_id=resource_id, - resource_type=resource.type - ) - LOG.debug('Finish deleting freezer backup resource') - bank_section.delete_object('metadata') - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETED) - freezer_job_operation.delete(job_id) - - -class FreezerProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.VOLUME_RESOURCE_TYPE] - - def __init__(self, config=None): - super(FreezerProtectionPlugin, self).__init__(config) - self._config.register_opts(freezer_backup_opts, - 'freezer_protection_plugin') - self._plugin_config = self._config.freezer_protection_plugin - self._poll_interval = self._plugin_config.poll_interval - self._scheduler_client_id = self._plugin_config.scheduler_client_id - self._freezer_storage = FreezerStorage( - storage_type=self._plugin_config.storage, - storage_path=self._plugin_config.container, - endpoint=self._plugin_config.endpoint, - access_key=self._plugin_config.access_key, - secret_key=self._plugin_config.secret_key, - ssh_key=self._plugin_config.ssh_key, - ssh_port=self._plugin_config.ssh_port, - ssh_username=self._plugin_config.ssh_username, - ssh_host=self._plugin_config.ssh_host - ) - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resource_type): - return volume_freezer_plugin_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resource_type): - return volume_freezer_plugin_schemas.RESTORE_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resource_type): - return volume_freezer_plugin_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - def get_protect_operation(self, resource): - return ProtectOperation(self._poll_interval, - self._freezer_storage, - self._scheduler_client_id - ) - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_delete_operation(self, resource): - return DeleteOperation(self._poll_interval) diff --git a/karbor/services/protection/protection_plugins/volume/volume_freezer_plugin_schemas.py b/karbor/services/protection/protection_plugins/volume/volume_freezer_plugin_schemas.py deleted file mode 100644 index 82409351..00000000 --- a/karbor/services/protection/protection_plugins/volume/volume_freezer_plugin_schemas.py +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Freezer Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the backup.", - "default": None - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the backup." - } - }, - "required": ["backup_name"] -} - -RESTORE_SCHEMA = { - "title": "Freezer Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Resource Name", - "description": "The name of the restore resource ", - "default": None - }, - }, - "required": ["restore_name"] -} - -SAVED_INFO_SCHEMA = { - "title": "Freezer Protection Saved Info", - "type": "object", - "properties": {}, - "required": [] -} diff --git a/karbor/services/protection/protection_plugins/volume/volume_glance_plugin.py b/karbor/services/protection/protection_plugins/volume/volume_glance_plugin.py deleted file mode 100644 index bd54293f..00000000 --- a/karbor/services/protection/protection_plugins/volume/volume_glance_plugin.py +++ /dev/null @@ -1,524 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial - -from cinderclient import exceptions as cinder_exc -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins import utils -from karbor.services.protection.protection_plugins.volume \ - import volume_glance_plugin_schemas as volume_schemas - -LOG = logging.getLogger(__name__) - -volume_glance_opts = [ - cfg.IntOpt( - 'poll_interval', default=15, - help='Poll interval for Cinder volume status.' - ), - cfg.BoolOpt( - 'backup_from_snapshot', default=True, - help='First take a snapshot of the volume, and backup from ' - 'it. Minimizes the time the volume is unavailable.' - ), - cfg.IntOpt('backup_image_object_size', - default=65536 * 512, - help='The size in bytes of temporary image objects. ' - 'The value must be a multiple of 65536(' - 'the size of image\'s chunk).'), -] - -VOLUME_SUCCESS_STATUSES = {'available', 'in-use', - 'error_extending', 'error_restoring'} - -VOLUME_FAILURE_STATUSES = {'error', 'error_deleting', 'deleting', - 'not-found'} - -VOLUME_IGNORE_STATUSES = {'attaching', 'creating', 'backing-up', - 'restoring-backup', 'uploading', 'downloading'} - - -def get_snapshot_status(cinder_client, snapshot_id): - return get_resource_status(cinder_client.volume_snapshots, snapshot_id, - 'snapshot') - - -def get_volume_status(cinder_client, volume_id): - return get_resource_status(cinder_client.volumes, volume_id, 'volume') - - -def get_image_status(glance_client, image_id): - LOG.debug('Polling image (image_id: %s)', image_id) - try: - status = glance_client.images.get(image_id)['status'] - except exception.NotFound: - status = 'not-found' - LOG.debug('Polled image (image_id: %s) status: %s', - image_id, status) - return status - - -def get_resource_status(resource_manager, resource_id, resource_type): - LOG.debug('Polling %(resource_type)s (id: %(resource_id)s)', - {'resource_type': resource_type, 'resource_id': resource_id}) - try: - resource = resource_manager.get(resource_id) - status = resource.status - except cinder_exc.NotFound: - status = 'not-found' - LOG.debug( - 'Polled %(resource_type)s (id: %(resource_id)s) status: %(status)s', - {'resource_type': resource_type, 'resource_id': resource_id, - 'status': status} - ) - return status - - -class ProtectOperation(protection_plugin.Operation): - def __init__(self, poll_interval, backup_from_snapshot, image_object_size): - super(ProtectOperation, self).__init__() - self._interval = poll_interval - self._backup_from_snapshot = backup_from_snapshot - self._image_object_size = image_object_size - - def _create_snapshot(self, cinder_client, volume_id): - LOG.info("Start creating snapshot of volume({0}).".format(volume_id)) - snapshot = cinder_client.volume_snapshots.create( - volume_id, - name='temporary_snapshot_of_{0}'.format(volume_id), - force=True - ) - - snapshot_id = snapshot.id - is_success = utils.status_poll( - partial(get_snapshot_status, cinder_client, snapshot_id), - interval=self._interval, - success_statuses={'available', }, - failure_statuses={'error', 'error_deleting', 'deleting', - 'not-found'}, - ignore_statuses={'creating', }, - ) - if is_success is not True: - try: - snapshot = cinder_client.volume_snapshots.get(snapshot_id) - except Exception: - reason = 'Unable to find volume snapshot.' - else: - reason = 'The status of snapshot is %s' % snapshot.status - raise exception.CreateResourceFailed( - name="Volume Glance Backup", - reason=reason, - resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE - ) - LOG.info("Create snapshot of volume({0}) success, " - "snapshot_id({1})".format(volume_id, snapshot_id)) - return snapshot_id - - def _create_temporary_volume(self, cinder_client, snapshot_id): - LOG.info("Start creating volume from snapshot({0}) success" - "".format(snapshot_id)) - snapshot = cinder_client.volume_snapshots.get(snapshot_id) - volume = cinder_client.volumes.create( - size=snapshot.size, - snapshot_id=snapshot_id, - name='temporary_volume_of_{0}'.format(snapshot_id) - ) - is_success = utils.status_poll( - partial(get_volume_status, cinder_client, volume.id), - interval=self._interval, - success_statuses=VOLUME_SUCCESS_STATUSES, - failure_statuses=VOLUME_FAILURE_STATUSES, - ignore_statuses=VOLUME_IGNORE_STATUSES, - ) - volume = cinder_client.volumes.get(volume.id) - if is_success is not True: - LOG.error('The status of temporary volume is invalid. status:%s', - volume.status) - reason = 'Invalid status: %s of temporary volume.' % volume.status - raise exception.CreateResourceFailed( - name="Volume Glance Backup", - reason=reason, - resource_id=volume.id, - resource_type=constants.VOLUME_RESOURCE_TYPE, - ) - LOG.info("Create volume from snapshot({0}) success, " - "volume({1})".format(snapshot_id, volume.id)) - return volume - - def _create_temporary_image(self, cinder_client, glance_client, - temporary_volume): - LOG.info("Start creating image from volume({0})." - "".format(temporary_volume.id)) - image = cinder_client.volumes.upload_to_image( - volume=temporary_volume, - force=True, - image_name='temporary_image_of_{0}'.format(temporary_volume.id), - container_format="bare", - disk_format="raw", - visibility="private", - protected=False - ) - image_id = image[1]['os-volume_upload_image']['image_id'] - is_success = utils.status_poll( - partial(get_image_status, glance_client, image_id), - interval=self._interval, success_statuses={'active'}, - ignore_statuses={'queued', 'saving'}, - failure_statuses={'killed', 'deleted', 'pending_delete', - 'deactivated', 'NotFound'} - ) - image_info = glance_client.images.get(image_id) - if is_success is not True: - LOG.error("The status of image (id: %s) is invalid.", - image_id) - reason = "Invalid status: %s of temporary image." % \ - image_info.status - raise exception.CreateResourceFailed( - name="Volume Glance Backup", - reason=reason, - resource_id=image_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - LOG.info("Create image({0}) from volume({1}) " - "success.".format(image_id, temporary_volume.id)) - return image_id - - def _backup_temporary_image(self, glance_client, image_id, bank_section): - try: - chunks_num = utils.backup_image_to_bank( - glance_client, - image_id, - bank_section, - self._image_object_size - ) - image_info = glance_client.images.get(image_id) - image_resource_definition = { - 'chunks_num': chunks_num, - 'image_metadata': { - 'checksum': image_info.checksum, - 'disk_format': image_info.disk_format, - "container_format": image_info.container_format - } - } - return image_resource_definition - except Exception as err: - LOG.exception('Protecting temporary image (id: %s) to bank ' - 'failed.', image_id) - raise exception.CreateResourceFailed( - name="Volume Glance Backup", - reason=err, - resource_id=image_id, - resource_type=constants.IMAGE_RESOURCE_TYPE) - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - volume_id = resource.id - bank_section = checkpoint.get_resource_bank_section(volume_id) - cinder_client = ClientFactory.create_client('cinder', context) - glance_client = ClientFactory.create_client('glance', context) - LOG.info('creating volume backup by glance, volume_id: %s', volume_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_PROTECTING) - resource_metadata = { - 'volume_id': volume_id, - } - is_success = utils.status_poll( - partial(get_volume_status, cinder_client, volume_id), - interval=self._interval, - success_statuses=VOLUME_SUCCESS_STATUSES, - failure_statuses=VOLUME_FAILURE_STATUSES, - ignore_statuses=VOLUME_IGNORE_STATUSES, - ) - if not is_success: - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Volume Glance Backup", - reason='Volume is in erroneous state', - resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE, - ) - - volume_info = cinder_client.volumes.get(volume_id) - resource_metadata['volume_size'] = volume_info.size - snapshot_id = None - temporary_volume = None - temporary_image_id = None - - try: - snapshot_id = self._create_snapshot(cinder_client, volume_id) - temporary_volume = self._create_temporary_volume( - cinder_client, snapshot_id) - temporary_image_id = self._create_temporary_image( - cinder_client, glance_client, temporary_volume) - image_resource_metadata = \ - self._backup_temporary_image(glance_client, temporary_image_id, - bank_section) - metadata = dict(resource_metadata, **image_resource_metadata) - bank_section.update_object('metadata', metadata) - bank_section.update_object('status', - constants.RESOURCE_STATUS_AVAILABLE) - LOG.info('Backed up volume ' - '(volume_id: %(volume_id)s ' - 'snapshot_id: %(snapshot_id)s ' - 'temporary_volume_id: %(temporary_volume_id)s) ' - 'temporary_image_id: %(temporary_image_id)s ' - 'successfully', { - 'volume_id': volume_id, - 'snapshot_id': snapshot_id, - 'temporary_volume_id': temporary_volume.id, - 'temporary_image_id': temporary_image_id - }) - finally: - if snapshot_id: - try: - cinder_client.volume_snapshots.delete(snapshot_id) - except Exception as e: - LOG.warning('Failed deleting snapshot: %(snapshot_id)s. ' - 'Reason: %(reason)s', - {'snapshot_id': self.snapshot_id, 'reason': e}) - - if temporary_volume: - try: - cinder_client.volumes.delete(temporary_volume.id) - except Exception as e: - LOG.warning('Failed deleting temporary volume: ' - '%(temporary_volume_id)s. ' - 'Reason: %(reason)s', { - 'temporary_volume_id': temporary_volume.id, - 'reason': e - }) - if temporary_image_id: - try: - glance_client.images.delete(temporary_image_id) - except Exception as e: - LOG.warning('Failed deleting temporary image: ' - '%(temporary_image_id)s. ' - 'Reason: %(reason)s', { - 'temporary_image_id': temporary_image_id, - 'reason': e}) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - - def _create_volume_from_image(self, cinder_client, temporary_image, - restore_name, original_vol_id, volume_size, - description): - volume = cinder_client.volumes.create( - size=volume_size, - imageRef=temporary_image.id, - name=restore_name, - description=description - ) - is_success = utils.status_poll( - partial(get_volume_status, cinder_client, volume.id), - interval=self._interval, - success_statuses=VOLUME_SUCCESS_STATUSES, - failure_statuses=VOLUME_FAILURE_STATUSES, - ignore_statuses=VOLUME_IGNORE_STATUSES - ) - if not is_success: - LOG.error("Restore volume glance backup failed, so delete " - "the temporary volume: volume_id: %s.", original_vol_id) - cinder_client.volumes.delete(volume.id) - raise exception.CreateResourceFailed( - name="Volume Glance Backup", - reason='Restored Volume is in erroneous state', - resource_id=volume.id, - resource_type=constants.VOLUME_RESOURCE_TYPE, - ) - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_volume_id = resource.id - restore_name = parameters.get('restore_name', - '%s@%s' % (checkpoint.id, - original_volume_id)) - restore_description = parameters.get('restore_description', None) - bank_section = checkpoint.get_resource_bank_section(original_volume_id) - cinder_client = ClientFactory.create_client('cinder', context) - glance_client = ClientFactory.create_client('glance', context) - resource_metadata = bank_section.get_object('metadata') - volume_size = int(resource_metadata['volume_size']) - temporary_image = None - try: - temporary_image = self._create_temporary_image( - bank_section, glance_client, original_volume_id - ) - self._create_volume_from_image(cinder_client, temporary_image, - restore_name, original_volume_id, - volume_size, restore_description) - finally: - if temporary_image: - try: - glance_client.images.delete(temporary_image.id) - except Exception as e: - LOG.warning('Failed deleting temporary image: ' - '%(temporary_image_id)s. ' - 'Reason: %(reason)s', { - 'temporary_image_id': temporary_image.id, - 'reason': e - }) - LOG.info("Finish restoring volume backup, volume_id: %s.", - original_volume_id) - - def _create_temporary_image(self, bank_section, glance_client, - original_volume_id): - image_info = None - try: - image_info = utils.restore_image_from_bank( - glance_client, bank_section, - 'temporary_image_of_{0}'.format(original_volume_id)) - - if image_info.status != "active": - is_success = utils.status_poll( - partial(get_image_status, glance_client, image_info.id), - interval=self._interval, success_statuses={'active'}, - ignore_statuses={'queued', 'saving'}, - failure_statuses={'killed', 'deleted', 'pending_delete', - 'deactivated', 'not-found'} - ) - if is_success is not True: - LOG.error('The status of image is invalid. status:%s', - image_info.status) - raise exception.RestoreResourceFailed( - name="Volume Glance Backup", - reason="Create temporary image failed", - resource_id=original_volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE) - return image_info - except Exception as e: - LOG.error("Create temporary image of volume failed, " - "volume_id: %s.", original_volume_id) - LOG.exception(e) - if image_info is not None and hasattr(image_info, 'id'): - LOG.info("Delete the failed image, image_id: %s.", - image_info.id) - glance_client.images.delete(image_info.id) - raise exception.RestoreResourceFailed( - name="Volume Glance Backup", - reason=e, resource_id=original_volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_volume_id = resource.id - bank_section = checkpoint.get_resource_bank_section( - original_volume_id) - LOG.info('Verifying the volume backup, volume id: %s', - original_volume_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, original_volume_id) - - backup_status = bank_section.get_object("status") - - if backup_status == constants.RESOURCE_STATUS_AVAILABLE: - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ('The status of volume backup status is %s.' - % backup_status) - update_method(backup_status, reason) - raise exception.VerifyResourceFailed( - name="Volume backup", - reason=reason, - resource_id=original_volume_id, - resource_type=resource.type) - - -class DeleteOperation(protection_plugin.Operation): - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - volume_id = resource.id - bank_section = checkpoint.get_resource_bank_section(volume_id) - - LOG.info("Deleting volume backup, volume_id: %s.", volume_id) - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETING) - objects = bank_section.list_objects() - for obj in objects: - if obj == "status": - continue - bank_section.delete_object(obj) - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETED) - except Exception as err: - LOG.error("delete volume backup failed, volume_id: %s.", volume_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Volume Glance Backup", - reason=err, - resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE) - - -class VolumeGlanceProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.VOLUME_RESOURCE_TYPE] - - def __init__(self, config=None): - super(VolumeGlanceProtectionPlugin, self).__init__(config) - self._config.register_opts(volume_glance_opts, - 'volume_glance_plugin') - self._plugin_config = self._config.volume_glance_plugin - self._poll_interval = self._plugin_config.poll_interval - self._backup_from_snapshot = self._plugin_config.backup_from_snapshot - self._image_object_size = self._plugin_config.backup_image_object_size - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resources_type): - return volume_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resources_type): - return volume_schemas.RESTORE_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resources_type): - return volume_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_verify_schema(cls, resource_type): - return volume_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - def get_protect_operation(self, resource): - return ProtectOperation(self._poll_interval, - self._backup_from_snapshot, - self._image_object_size) - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_delete_operation(self, resource): - return DeleteOperation() - - def get_verify_operation(self, resource): - return VerifyOperation() diff --git a/karbor/services/protection/protection_plugins/volume/volume_glance_plugin_schemas.py b/karbor/services/protection/protection_plugins/volume/volume_glance_plugin_schemas.py deleted file mode 100644 index da448f1d..00000000 --- a/karbor/services/protection/protection_plugins/volume/volume_glance_plugin_schemas.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Volume Glance Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the backup.", - "default": None - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the backup." - } - }, - "required": ["backup_name"] -} - -RESTORE_SCHEMA = { - "title": "Volume Glance Protection Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Resource Name", - "description": "The name of the restore resource ", - "default": None - }, - }, - "required": ["restore_name"] -} - -VERIFY_SCHEMA = { - "title": "Volume Glance Protection Verify", - "type": "object", - "properties": {} -} - -SAVED_INFO_SCHEMA = { - "title": "Volume Glance Protection Saved Info", - "type": "object", - "properties": {}, - "required": [] -} diff --git a/karbor/services/protection/protection_plugins/volume/volume_plugin_cinder_schemas.py b/karbor/services/protection/protection_plugins/volume/volume_plugin_cinder_schemas.py deleted file mode 100644 index c6bd48d1..00000000 --- a/karbor/services/protection/protection_plugins/volume/volume_plugin_cinder_schemas.py +++ /dev/null @@ -1,136 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Cinder Protection Options", - "type": "object", - "properties": { - "backup_name": { - "type": "string", - "title": "Backup Name", - "description": "The name of the backup.", - "default": None - }, - "backup_mode": { - "type": "string", - "title": "Backup Mode", - "description": "The backup mode.", - "enum": ["full", "incremental"], - "default": "full" - }, - "container": { - "type": "string", - "title": "Container", - "description": "The container which been chosen.", - "default": None - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the volume.", - "default": None - }, - "force": { - "type": "boolean", - "title": "Force", - "description": "Whether to backup, even if the volume " - "is attached", - "default": False - } - }, - "required": ["backup_name", "backup_mode", "container", "force"] -} - -RESTORE_SCHEMA = { - "title": "Cinder Protection Restore", - "type": "object", - "properties": { - "volume_id": { - "type": "string", - "title": "Volume ID", - "description": "The target volume ID to restore to.", - "default": None - }, - "restore_name": { - "type": "string", - "title": "Restore Name", - "description": "The name of the restored volume.", - "default": None - }, - "restore_description": { - "type": "string", - "title": "Restore Description", - "description": "The description of the restored volume.", - "default": None - } - } -} - -VERIFY_SCHEMA = { - "title": "Cinder Volume Verify", - "type": "object", - "properties": {} -} - -SAVED_INFO_SCHEMA = { - "title": "Cinder Protection Saved Info", - "type": "object", - "properties": { - "name": { - "type": "string", - "title": "Name", - "description": "The name for this backup." - }, - "is_incremental": { - "type": "boolean", - "title": "Is Incremental", - "description": - "The type of the backup, " - "True is incremental and False is full." - }, - "status": { - "type": "string", - "title": "Status", - "description": "The backup status, such as available.", - "enum": ['creating', 'available', - 'deleting', 'error', - 'restoring', 'error_restoring'], - }, - "progress": { - "type": "number", - "title": "Progress", - "description": - "The current operation progress for this backup.", - "constraint": {'min': 0, 'max': 1}, - }, - "fail_reason": { - "type": "string", - "title": "Fail Reason", - "description": - "The reason for the failure status of the backup." - }, - "size": { - "type": "integer", - "title": "Size", - "description": "The size of the backup, in GB." - }, - "volume_id": { - "type": "string", - "title": "Volume ID", - "description": - ("The ID of the volume " - "from which the backup was created.") - }, - }, - "required": ["name", "status", "progress", "fail_reason", - "size", "volume_id"] -} diff --git a/karbor/services/protection/protection_plugins/volume/volume_snapshot_plugin.py b/karbor/services/protection/protection_plugins/volume/volume_snapshot_plugin.py deleted file mode 100644 index 12f7dbf8..00000000 --- a/karbor/services/protection/protection_plugins/volume/volume_snapshot_plugin.py +++ /dev/null @@ -1,356 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial -import six - -from cinderclient import exceptions as cinder_exc -from oslo_config import cfg -from oslo_log import log as logging - -from karbor.common import constants -from karbor import exception -from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection import protection_plugin -from karbor.services.protection.protection_plugins import utils -from karbor.services.protection.protection_plugins.volume \ - import volume_snapshot_plugin_schemas as volume_schemas - -LOG = logging.getLogger(__name__) - -volume_snapshot_opts = [ - cfg.IntOpt( - 'poll_interval', default=15, - help='Poll interval for Cinder volume status.' - ) -] - -VOLUME_FAILURE_STATUSES = {'error', 'error_deleting', 'deleting', - 'not-found'} - -VOLUME_IGNORE_STATUSES = {'attaching', 'creating', 'backing-up', - 'restoring-backup'} - - -def get_snapshot_status(cinder_client, snapshot_id): - return get_resource_status(cinder_client.volume_snapshots, snapshot_id, - 'snapshot') - - -def get_volume_status(cinder_client, volume_id): - return get_resource_status(cinder_client.volumes, volume_id, 'volume') - - -def get_resource_status(resource_manager, resource_id, resource_type): - LOG.debug('Polling %(resource_type)s (id: %(resource_id)s)', - {'resource_type': resource_type, 'resource_id': resource_id}) - try: - resource = resource_manager.get(resource_id) - status = resource.status - except cinder_exc.NotFound: - status = 'not-found' - LOG.debug( - 'Polled %(resource_type)s (id: %(resource_id)s) status: %(status)s', - {'resource_type': resource_type, 'resource_id': resource_id, - 'status': status} - ) - return status - - -class ProtectOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(ProtectOperation, self).__init__() - self._interval = poll_interval - - def _create_snapshot(self, cinder_client, volume_id, snapshot_name, - description, force): - snapshot = cinder_client.volume_snapshots.create( - volume_id=volume_id, - name=snapshot_name, - description=description, - force=force - ) - - snapshot_id = snapshot.id - is_success = utils.status_poll( - partial(get_snapshot_status, cinder_client, snapshot_id), - interval=self._interval, - success_statuses={'available'}, - failure_statuses={'error', 'error_deleting', 'deleting', - 'not-found'}, - ignore_statuses={'creating'}, - ignore_unexpected=True - ) - - if not is_success: - try: - snapshot = cinder_client.volume_snapshots.get(snapshot_id) - except Exception: - reason = 'Unable to find volume snapshot.' - else: - reason = 'The status of snapshot is %s' % snapshot.status - raise exception.CreateResourceFailed( - name="Volume Snapshot", - reason=reason, resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE) - - return snapshot_id - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - volume_id = resource.id - bank_section = checkpoint.get_resource_bank_section(volume_id) - cinder_client = ClientFactory.create_client('cinder', context) - LOG.info('Creating volume snapshot, volume_id: %s', volume_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_PROTECTING) - volume_info = cinder_client.volumes.get(volume_id) - is_success = utils.status_poll( - partial(get_volume_status, cinder_client, volume_id), - interval=self._interval, - success_statuses={'available', 'in-use', 'error_extending', - 'error_restoring'}, - failure_statuses=VOLUME_FAILURE_STATUSES, - ignore_statuses=VOLUME_IGNORE_STATUSES, - ) - if not is_success: - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Volume Snapshot", - reason='Volume is in a error status.', - resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE, - ) - resource_metadata = { - 'volume_id': volume_id, - 'size': volume_info.size - } - snapshot_name = parameters.get('snapshot_name', None) - description = parameters.get('description', None) - force = parameters.get('force', False) - try: - snapshot_id = self._create_snapshot(cinder_client, volume_id, - snapshot_name, - description, force) - except Exception as e: - LOG.error('Error creating snapshot (volume_id: %(volume_id)s ' - ': %(reason)s', {'volume_id': volume_id, 'reason': e}) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateResourceFailed( - name="Volume Snapshot", - reason=e, resource_id=volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE, - ) - - resource_metadata['snapshot_id'] = snapshot_id - bank_section.update_object('metadata', resource_metadata) - bank_section.update_object('status', - constants.RESOURCE_STATUS_AVAILABLE) - LOG.info('Snapshot volume (volume_id: %(volume_id)s snapshot_id: ' - '%(snapshot_id)s ) successfully', - {'volume_id': volume_id, 'snapshot_id': snapshot_id}) - - -class RestoreOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(RestoreOperation, self).__init__() - self._interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_volume_id = resource.id - bank_section = checkpoint.get_resource_bank_section(original_volume_id) - cinder_client = ClientFactory.create_client('cinder', context) - resource_metadata = bank_section.get_object('metadata') - restore_name = parameters.get('restore_name', - 'volume-%s@%s' % (checkpoint.id, - original_volume_id)) - restore_description = parameters.get('restore_description', None) - snapshot_id = resource_metadata['snapshot_id'] - size = resource_metadata['size'] - restore = kwargs.get('restore') - LOG.info("Restoring a volume from snapshot, " - "original_volume_id: %s", original_volume_id) - try: - volume = cinder_client.volumes.create( - size, snapshot_id=snapshot_id, - name=restore_name, - description=restore_description) - is_success = utils.status_poll( - partial(get_volume_status, cinder_client, volume.id), - interval=self._interval, - success_statuses={'available', 'in-use', 'error_extending', - 'error_restoring'}, - failure_statuses=VOLUME_FAILURE_STATUSES, - ignore_statuses=VOLUME_IGNORE_STATUSES, - ) - volume = cinder_client.volumes.get(volume.id) - if is_success is not True: - LOG.error('The status of volume is invalid. status:%s', - volume.status) - reason = 'Invalid status: %s' % volume.status - restore.update_resource_status( - constants.VOLUME_RESOURCE_TYPE, - volume.id, volume.status, reason) - restore.save() - raise exception.RestoreResourceFailed( - name="Volume Snapshot", - resource_id=original_volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE) - restore.update_resource_status(constants.VOLUME_RESOURCE_TYPE, - volume.id, volume.status) - restore.save() - except Exception as e: - LOG.error("Restore volume from snapshot failed, volume_id: %s", - original_volume_id) - raise exception.RestoreResourceFailed( - name="Volume Snapshot", - reason=e, resource_id=original_volume_id, - resource_type=constants.VOLUME_RESOURCE_TYPE) - LOG.info("Finish restoring a volume from snapshot, volume_id: %s", - original_volume_id) - - -class VerifyOperation(protection_plugin.Operation): - def __init__(self): - super(VerifyOperation, self).__init__() - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - original_volume_id = resource.id - bank_section = checkpoint.get_resource_bank_section(original_volume_id) - cinder_client = ClientFactory.create_client('cinder', context) - resource_metadata = bank_section.get_object('metadata') - LOG.info('Verifying the volume snapshot, volume_id: %s', - original_volume_id) - - update_method = partial( - utils.update_resource_verify_result, - kwargs.get('verify'), resource.type, original_volume_id) - - snapshot_id = resource_metadata['snapshot_id'] - try: - volume_snapshot = cinder_client.volume_snapshots.get(snapshot_id) - snapshot_status = volume_snapshot.status - except Exception as ex: - LOG.error('Getting volume snapshot (snapshot_id: %(snapshot_id)s):' - '%(reason)s fails', - {'snapshot_id': snapshot_id, 'reason': ex}) - reason = 'Getting volume backup fails.' - update_method(constants.RESOURCE_STATUS_ERROR, reason) - raise - - if snapshot_status == 'available': - update_method(constants.RESOURCE_STATUS_AVAILABLE) - else: - reason = ('The status of volume snapshot status is %s.' - % snapshot_status) - update_method(snapshot_status, reason) - raise exception.VerifyResourceFailed( - name="Volume snapshot", - reason=reason, - resource_id=original_volume_id, - resource_type=resource.type - ) - - -class DeleteOperation(protection_plugin.Operation): - def __init__(self, poll_interval): - super(DeleteOperation, self).__init__() - self._interval = poll_interval - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - resource_id = resource.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - snapshot_id = None - try: - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETING) - resource_metadata = bank_section.get_object('metadata') - snapshot_id = resource_metadata['snapshot_id'] - cinder_client = ClientFactory.create_client('cinder', context) - try: - snapshot = cinder_client.volume_snapshots.get(snapshot_id) - cinder_client.volume_snapshots.delete(snapshot) - except cinder_exc.NotFound: - LOG.info('Snapshot id: %s not found. Assuming deleted', - snapshot_id) - is_success = utils.status_poll( - partial(get_snapshot_status, cinder_client, snapshot_id), - interval=self._interval, - success_statuses={'deleted', 'not-found'}, - failure_statuses={'error', 'error_deleting'}, - ignore_statuses={'deleting'}, - ignore_unexpected=True - ) - if not is_success: - raise exception.NotFound() - bank_section.delete_object('metadata') - bank_section.update_object('status', - constants.RESOURCE_STATUS_DELETED) - except Exception as e: - LOG.error('Delete volume snapshot failed, snapshot_id: %s', - snapshot_id) - bank_section.update_object('status', - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteResourceFailed( - name="Volume Snapshot", - reason=six.text_type(e), - resource_id=resource_id, - resource_type=constants.VOLUME_RESOURCE_TYPE - ) - - -class VolumeSnapshotProtectionPlugin(protection_plugin.ProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.VOLUME_RESOURCE_TYPE] - - def __init__(self, config=None): - super(VolumeSnapshotProtectionPlugin, self).__init__(config) - self._config.register_opts(volume_snapshot_opts, - 'volume_snapshot_plugin') - self._plugin_config = self._config.volume_snapshot_plugin - self._poll_interval = self._plugin_config.poll_interval - - @classmethod - def get_supported_resources_types(cls): - return cls._SUPPORT_RESOURCE_TYPES - - @classmethod - def get_options_schema(cls, resources_type): - return volume_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resources_type): - return volume_schemas.RESTORE_SCHEMA - - @classmethod - def get_verify_schema(cls, resources_type): - return volume_schemas.VERIFY_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resources_type): - return volume_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - pass - - def get_protect_operation(self, resource): - return ProtectOperation(self._poll_interval) - - def get_restore_operation(self, resource): - return RestoreOperation(self._poll_interval) - - def get_verify_operation(self, resource): - return VerifyOperation() - - def get_delete_operation(self, resource): - return DeleteOperation(self._poll_interval) diff --git a/karbor/services/protection/protection_plugins/volume/volume_snapshot_plugin_schemas.py b/karbor/services/protection/protection_plugins/volume/volume_snapshot_plugin_schemas.py deleted file mode 100644 index 2b4116c4..00000000 --- a/karbor/services/protection/protection_plugins/volume/volume_snapshot_plugin_schemas.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -OPTIONS_SCHEMA = { - "title": "Volume Snapshot Options", - "type": "object", - "properties": { - "snapshot_name": { - "type": "string", - "title": "Snapshot Name", - "description": "The name of the snapshot.", - "default": None - }, - "description": { - "type": "string", - "title": "Description", - "description": "The description of the volume." - }, - "force": { - "type": "boolean", - "title": "Force", - "description": "Allows or disallows snapshot of a volume when the " - "volume is attached to an instance.", - "default": False - } - }, - "required": ["snapshot_name", "force"] -} - -RESTORE_SCHEMA = { - "title": "Volume snapshot Restore", - "type": "object", - "properties": { - "restore_name": { - "type": "string", - "title": "Restore Volume Name", - "description": "The name of the restore volume", - "default": None - }, - "restore_description": { - "type": "string", - "title": "Restore Description", - "description": "The description of the restored volume.", - "default": None - } - }, - "required": ["restore_name"] -} - -VERIFY_SCHEMA = { - "title": "Volume snapshot Verify", - "type": "object", - "properties": {} -} - -SAVED_INFO_SCHEMA = { - "title": "Volume Protection Saved Info", - "type": "object", - "properties": {}, - "required": [] -} diff --git a/karbor/services/protection/provider.py b/karbor/services/protection/provider.py deleted file mode 100755 index 0ed0109f..00000000 --- a/karbor/services/protection/provider.py +++ /dev/null @@ -1,233 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from karbor import exception -from karbor.i18n import _ -from karbor.services.protection import bank_plugin -from karbor.services.protection.checkpoint import CheckpointCollection -from karbor import utils -from oslo_config import cfg -from oslo_log import log as logging - -provider_opts = [ - cfg.MultiStrOpt('plugin', - default='', - help='plugins to use for protection'), - cfg.StrOpt('bank', - default='', - help='bank plugin to use for storage'), - cfg.StrOpt('description', - default='', - help='the description of provider'), - cfg.StrOpt('name', - default='', - help='the name of provider'), - cfg.StrOpt('id', - default='', - help='the provider id'), - cfg.BoolOpt('enabled', - default=False, - help='enabled or not'), -] -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - -PROTECTION_NAMESPACE = 'karbor.protections' - -CONF.register_opt(cfg.StrOpt('provider_config_dir', - default='providers.d', - help='Configuration directory for providers.' - ' Absolute path, or relative to karbor ' - ' configuration directory.')) - - -class PluggableProtectionProvider(object): - def __init__(self, provider_config): - super(PluggableProtectionProvider, self).__init__() - self._config = provider_config - self._id = self._config.provider.id - self._name = self._config.provider.name - self._description = self._config.provider.description - self._extended_info_schema = {'options_schema': {}, - 'restore_schema': {}, - 'saved_info_schema': {}} - self.checkpoint_collection = None - self._bank_plugin = None - self._plugin_map = {} - - if (hasattr(self._config.provider, 'bank') and - not self._config.provider.bank): - raise ImportError(_("Empty bank")) - - self._load_bank(self._config.provider.bank) - self._bank = bank_plugin.Bank(self._bank_plugin) - self.checkpoint_collection = CheckpointCollection( - self._bank) - - if hasattr(self._config.provider, 'plugin'): - for plugin_name in self._config.provider.plugin: - if not plugin_name: - raise ImportError(_("Empty protection plugin")) - self._register_plugin(plugin_name) - - @property - def id(self): - return self._id - - @property - def name(self): - return self._name - - @property - def description(self): - return self._description - - @property - def extended_info_schema(self): - return self._extended_info_schema - - @property - def bank(self): - return self._bank - - @property - def plugins(self): - return self._plugin_map - - def load_plugins(self): - return { - plugin_type: plugin_class(self._config) - for plugin_type, plugin_class in self.plugins.items() - } - - def _load_bank(self, bank_name): - try: - plugin = utils.load_plugin(PROTECTION_NAMESPACE, bank_name, - self._config) - except Exception: - LOG.exception("Load bank plugin: '%s' failed.", bank_name) - raise - else: - self._bank_plugin = plugin - - def _register_plugin(self, plugin_name): - try: - plugin = utils.load_class(PROTECTION_NAMESPACE, plugin_name) - except Exception: - LOG.exception("Load protection plugin: '%s' failed.", plugin_name) - raise - else: - for resource in plugin.get_supported_resources_types(): - self._plugin_map[resource] = plugin - if hasattr(plugin, 'get_options_schema'): - self._extended_info_schema['options_schema'][resource] \ - = plugin.get_options_schema(resource) - if hasattr(plugin, 'get_restore_schema'): - self._extended_info_schema['restore_schema'][resource] \ - = plugin.get_restore_schema(resource) - if hasattr(plugin, 'get_saved_info_schema'): - self._extended_info_schema['saved_info_schema'][resource] \ - = plugin.get_saved_info_schema(resource) - - def get_checkpoint_collection(self): - return self.checkpoint_collection - - def get_checkpoint(self, checkpoint_id, context=None): - return self.get_checkpoint_collection().get(checkpoint_id, - context=context) - - def list_checkpoints(self, project_id, provider_id, limit=None, - marker=None, plan_id=None, start_date=None, - end_date=None, sort_dir=None, context=None, - all_tenants=False): - checkpoint_collection = self.get_checkpoint_collection() - return checkpoint_collection.list_ids( - project_id=project_id, provider_id=provider_id, limit=limit, - marker=marker, plan_id=plan_id, start_date=start_date, - end_date=end_date, sort_dir=sort_dir, context=context, - all_tenants=all_tenants) - - -class ProviderRegistry(object): - def __init__(self): - super(ProviderRegistry, self).__init__() - self.providers = {} - self._load_providers() - - def _load_providers(self): - """load provider""" - config_dir = utils.find_config(CONF.provider_config_dir) - - for config_file in os.listdir(config_dir): - if not config_file.endswith('.conf'): - continue - config_path = os.path.abspath(os.path.join(config_dir, - config_file)) - provider_config = cfg.ConfigOpts() - provider_config(args=['--config-file=' + config_path]) - provider_config.register_opts(provider_opts, 'provider') - - provider_enabled = provider_config.provider.enabled - if not provider_enabled: - LOG.info('Provider {0} is not enabled'.format( - provider_config.provider.name) - ) - continue - - try: - provider = PluggableProtectionProvider(provider_config) - except Exception as e: - LOG.error("Load provider: %(provider)s failed. " - "Reason: %(reason)s", - {'provider': provider_config.provider.name, - 'reason': e}) - else: - LOG.info('Loaded provider: %s successfully.', - provider_config.provider.name) - self.providers[provider.id] = provider - - def list_providers(self, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None): - # TODO(jiaopengju) How to use sort_keys, sort_dirs and filters - provider_ids = sorted(self.providers.keys()) - provider_list = sorted( - self.providers.values(), key=lambda item: item.id) - if marker is not None and marker in provider_ids: - provider_list = provider_list[provider_ids.index(marker) + 1:] - filters = filters if filters else {} - valid_providers = [] - for provider in provider_list: - provider_dict = dict( - id=provider.id, - name=provider.name, - description=provider.description, - extended_info_schema=provider.extended_info_schema - ) - for key, value in filters.items(): - if key in provider_dict.keys() and \ - value != provider_dict[key]: - break - else: - valid_providers.append(provider_dict) - - if limit is not None and len(valid_providers) == limit: - return valid_providers - return valid_providers - - def show_provider(self, provider_id): - try: - return self.providers[provider_id] - except KeyError: - raise exception.ProviderNotFound(provider_id=provider_id) diff --git a/karbor/services/protection/resource_flow.py b/karbor/services/protection/resource_flow.py deleted file mode 100644 index e6166773..00000000 --- a/karbor/services/protection/resource_flow.py +++ /dev/null @@ -1,191 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from collections import namedtuple - -from karbor.common import constants -from karbor import exception -from karbor.services.protection import graph -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - - -HOOKS = ( - HOOK_PRE_BEGIN, - HOOK_PRE_FINISH, - HOOK_MAIN, - HOOK_COMPLETE -) = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete' -) - -ResourceHooks = namedtuple('ResourceHooks', [ - HOOK_PRE_BEGIN, - HOOK_PRE_FINISH, - HOOK_MAIN, - HOOK_COMPLETE, -]) - - -OPERATION_EXTRA_ARGS = { - constants.OPERATION_RESTORE: ['restore', 'new_resources'], - constants.OPERATION_VERIFY: ['verify', 'new_resources'], - constants.OPERATION_COPY: ['checkpoint', 'checkpoint_copy'], -} - - -def noop_handle(*args, **kwargs): - pass - - -class ResourceFlowGraphWalkerListener(graph.GraphWalkerListener): - def __init__(self, resource_flow, operation_type, context, parameters, - plugins, workflow_engine): - super(ResourceFlowGraphWalkerListener, self).__init__() - self.operation_type = operation_type - self.context = context - self.parameters = parameters or {} - self.plugins = plugins - self.workflow_engine = workflow_engine - self.flow = resource_flow - - self.node_tasks = {} - self.task_stack = [] - self.current_resource = None - - def _create_hook_tasks(self, operation_obj, resource): - pre_begin_task = self._create_hook_task(operation_obj, resource, - HOOK_PRE_BEGIN) - pre_finish_task = self._create_hook_task(operation_obj, resource, - HOOK_PRE_FINISH) - main_task = self._create_hook_task(operation_obj, resource, - HOOK_MAIN) - post_task = self._create_hook_task(operation_obj, resource, - HOOK_COMPLETE) - - return ResourceHooks(pre_begin_task, pre_finish_task, main_task, - post_task) - - def _create_hook_task(self, operation_obj, resource, hook_type): - method = getattr(operation_obj, hook_type, noop_handle) - assert callable(method), ( - 'Resource {} method "{}" is not callable' - ).format(resource.type, hook_type) - - task_name = "{operation_type}_{hook_type}_{type}_{id}".format( - type=resource.type, - id=resource.id, - hook_type=hook_type, - operation_type=self.operation_type, - ) - - parameters = {} - parameters.update(self.parameters.get(resource.type, {})) - resource_id = '{}#{}'.format(resource.type, resource.id) - parameters.update(self.parameters.get(resource_id, {})) - injects = { - 'context': self.context, - 'parameters': parameters, - 'resource': resource, - } - if self.operation_type == constants.OPERATION_COPY: - injects['checkpoint'] = self.parameters.get( - 'checkpoint') - injects['checkpoint_copy'] = self.parameters.get( - 'checkpoint_copy') - injects['operation_log'] = self.parameters.get( - 'operation_log') - - requires = OPERATION_EXTRA_ARGS.get(self.operation_type, []) - requires.append('operation_log') - task = self.workflow_engine.create_task(method, - name=task_name, - inject=injects, - requires=requires) - return task - - def on_node_enter(self, node, already_visited): - resource = node.value - LOG.debug( - "Enter node (type: %(type)s id: %(id)s visited: %(visited)s)", - {"type": resource.type, "id": resource.id, "visited": - already_visited} - ) - self.current_resource = resource - if already_visited: - self.task_stack.append(self.node_tasks[resource.id]) - return - - if resource.type not in self.plugins: - raise exception.ProtectionPluginNotFound(type=resource.type) - - protection_plugin = self.plugins[resource.type] - operation_getter_name = 'get_{}_operation'.format(self.operation_type) - operation_getter = getattr(protection_plugin, operation_getter_name) - assert callable(operation_getter) - operation_obj = operation_getter(resource) - hooks = self._create_hook_tasks(operation_obj, resource) - LOG.debug("added operation %s hooks", self.operation_type) - self.node_tasks[resource.id] = hooks - self.task_stack.append(hooks) - self.workflow_engine.add_tasks(self.flow, hooks.on_prepare_begin, - hooks.on_prepare_finish, hooks.on_main, - hooks.on_complete) - self.workflow_engine.link_task(self.flow, hooks.on_prepare_begin, - hooks.on_prepare_finish) - self.workflow_engine.link_task(self.flow, hooks.on_prepare_finish, - hooks.on_main) - self.workflow_engine.link_task(self.flow, hooks.on_main, - hooks.on_complete) - - def on_node_exit(self, node): - resource = node.value - LOG.debug( - "Exit node (type: %(type)s id: %(id)s)", - {"type": resource.type, "id": resource.id} - ) - child_hooks = self.task_stack.pop() - if len(self.task_stack) > 0: - parent_hooks = self.task_stack[-1] - self.workflow_engine.link_task(self.flow, - parent_hooks.on_prepare_begin, - child_hooks.on_prepare_begin) - self.workflow_engine.link_task(self.flow, - child_hooks.on_prepare_finish, - parent_hooks.on_prepare_finish) - self.workflow_engine.link_task(self.flow, child_hooks.on_complete, - parent_hooks.on_complete) - - -def build_resource_flow(operation_type, context, workflow_engine, - plugins, resource_graph, parameters): - LOG.info("Build resource flow for operation %s", operation_type) - - resource_graph_flow = workflow_engine.build_flow( - 'ResourceGraphFlow_{}'.format(operation_type), - 'graph', - ) - resource_walker = ResourceFlowGraphWalkerListener(resource_graph_flow, - operation_type, - context, - parameters, - plugins, - workflow_engine) - walker = graph.GraphWalker() - walker.register_listener(resource_walker) - LOG.debug("Starting resource graph walk (operation %s)", operation_type) - walker.walk_graph(resource_graph) - LOG.debug("Finished resource graph walk (operation %s)", operation_type) - return resource_graph_flow diff --git a/karbor/services/protection/rpcapi.py b/karbor/services/protection/rpcapi.py deleted file mode 100644 index ce734c91..00000000 --- a/karbor/services/protection/rpcapi.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Client side of the protection manager RPC API. -""" - -from oslo_config import cfg -import oslo_messaging as messaging - -from karbor.objects import base as objects_base -from karbor import rpc - - -CONF = cfg.CONF - - -class ProtectionAPI(object): - """Client side of the protection rpc API. - - API version history: - - 1.0 - Initial version. - """ - - RPC_API_VERSION = '1.0' - - def __init__(self): - super(ProtectionAPI, self).__init__() - target = messaging.Target(topic=CONF.protection_topic, - version=self.RPC_API_VERSION) - serializer = objects_base.KarborObjectSerializer() - self.client = rpc.get_client(target, version_cap=None, - serializer=serializer) - - def restore(self, ctxt, restore=None, restore_auth=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'restore', - restore=restore, - restore_auth=restore_auth) - - def verification(self, ctxt, verification=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'verification', - verification=verification) - - def protect(self, ctxt, plan=None, checkpoint_properties=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'protect', - plan=plan, - checkpoint_properties=checkpoint_properties) - - def copy(self, ctxt, plan=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'copy', - plan=plan) - - def delete(self, ctxt, provider_id, checkpoint_id): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'delete', - provider_id=provider_id, - checkpoint_id=checkpoint_id) - - def reset_state(self, ctxt, provider_id, checkpoint_id, state): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'reset_state', - provider_id=provider_id, - checkpoint_id=checkpoint_id, - state=state) - - def show_checkpoint(self, ctxt, provider_id, checkpoint_id): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'show_checkpoint', - provider_id=provider_id, - checkpoint_id=checkpoint_id) - - def list_checkpoints(self, ctxt, provider_id, marker=None, - limit=None, sort_keys=None, - sort_dirs=None, filters=None, all_tenants=False): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'list_checkpoints', - provider_id=provider_id, - marker=marker, - limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - all_tenants=all_tenants - ) - - def list_protectable_types(self, ctxt): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'list_protectable_types') - - def show_protectable_type(self, ctxt, protectable_type=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'show_protectable_type', - protectable_type=protectable_type) - - def list_protectable_instances( - self, ctxt, protectable_type=None, - marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, parameters=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'list_protectable_instances', - protectable_type=protectable_type, - marker=marker, - limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - parameters=parameters) - - def list_protectable_dependents(self, - ctxt, protectable_id=None, - protectable_type=None, - protectable_name=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'list_protectable_dependents', - protectable_id=protectable_id, - protectable_type=protectable_type, - protectable_name=protectable_name) - - def show_protectable_instance(self, - ctxt, protectable_type=None, - protectable_id=None, - parameters=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'show_protectable_instance', - protectable_type=protectable_type, - protectable_id=protectable_id, - parameters=parameters) - - def show_provider(self, - ctxt, provider_id=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'show_provider', - provider_id=provider_id) - - def list_providers(self, ctxt, marker=None, limit=None, - sort_keys=None, - sort_dirs=None, filters=None): - cctxt = self.client.prepare(version='1.0') - return cctxt.call( - ctxt, - 'list_providers', - marker=marker, - limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters) diff --git a/karbor/tests/__init__.py b/karbor/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/base.py b/karbor/tests/base.py deleted file mode 100644 index 2363ccab..00000000 --- a/karbor/tests/base.py +++ /dev/null @@ -1,118 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -from unittest import mock - -import fixtures -from oslo_config import cfg -from oslo_messaging import conffixture as messaging_conffixture -from oslo_utils import timeutils -from oslotest import base - -from karbor.common import config # noqa Need to register global_opts -from karbor.db import migration -from karbor.db.sqlalchemy import api as sqla_api -from karbor import rpc -from karbor.tests.unit import conf_fixture - - -CONF = cfg.CONF - -_DB_CACHE = None - - -class Database(fixtures.Fixture): - - def __init__(self, db_api, db_migrate, sql_connection): - super(Database, self).__init__() - self.sql_connection = sql_connection - - # Suppress logging for test runs - migrate_logger = logging.getLogger('migrate') - migrate_logger.setLevel(logging.WARNING) - - self.engine = db_api.get_engine() - self.engine.dispose() - conn = self.engine.connect() - db_migrate.db_sync() - - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - - def setUp(self): - super(Database, self).setUp() - - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - - -class TestCase(base.BaseTestCase): - - """Test case base class for all unit tests.""" - - def setUp(self): - """Run before each test method to initialize test environment.""" - super(TestCase, self).setUp() - - rpc.add_extra_exmods("karbor.tests.unit") - self.addCleanup(rpc.clear_extra_exmods) - self.addCleanup(rpc.cleanup) - - self.messaging_conf = messaging_conffixture.ConfFixture(CONF) - self.messaging_conf.transport_url = 'fake:/' - self.messaging_conf.response_timeout = 15 - self.useFixture(self.messaging_conf) - - rpc.init(CONF) - - conf_fixture.set_defaults(CONF) - CONF([], default_config_files=[]) - - # NOTE(vish): We need a better method for creating fixtures for tests - # now that we have some required db setup for the system - # to work properly. - self.start = timeutils.utcnow() - - CONF.set_default('connection', 'sqlite://', 'database') - CONF.set_default('sqlite_synchronous', False, 'database') - - global _DB_CACHE - if not _DB_CACHE: - _DB_CACHE = Database(sqla_api, migration, - sql_connection=CONF.database.connection) - self.useFixture(_DB_CACHE) - - def override_config(self, name, override, group=None): - """Cleanly override CONF variables.""" - CONF.set_override(name, override, group) - self.addCleanup(CONF.clear_override, name, group) - - def flags(self, **kw): - """Override CONF variables for a test.""" - for k, v in kw.items(): - self.override_config(k, v) - - def mock_object(self, obj, attr_name, new_attr=None, **kwargs): - """Use python mock to mock an object attribute - - Mocks the specified objects attribute with the given value. - Automatically performs 'addCleanup' for the mock. - - """ - if not new_attr: - new_attr = mock.Mock() - patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs) - patcher.start() - self.addCleanup(patcher.stop) - return new_attr diff --git a/karbor/tests/contrib/gate_hook.sh b/karbor/tests/contrib/gate_hook.sh deleted file mode 100644 index 5f37f831..00000000 --- a/karbor/tests/contrib/gate_hook.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -VENV=${1:-"fullstack"} - -GATE_DEST=$BASE/new -DEVSTACK_PATH=$GATE_DEST/devstack - -$BASE/new/devstack-gate/devstack-vm-gate.sh \ No newline at end of file diff --git a/karbor/tests/contrib/post_test_hook.sh b/karbor/tests/contrib/post_test_hook.sh deleted file mode 100644 index 3de780ba..00000000 --- a/karbor/tests/contrib/post_test_hook.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -set -xe - -KARBOR_DIR="$BASE/new/karbor" -TEMPEST_DIR="$BASE/new/tempest" -SCRIPTS_DIR="/usr/os-testr-env/bin/" - -venv=${1:-"fullstack"} - -function generate_test_logs { - local path="$1" - # Compress all $path/*.txt files and move the directories holding those - # files to /opt/stack/logs. Files with .log suffix have their - # suffix changed to .txt (so browsers will know to open the compressed - # files and not download them). - if [[ -d "$path" ]] ; then - sudo find "$path" -iname "*.log" -type f -exec mv {} {}.txt \; -exec gzip -9 {}.txt \; - sudo mv "$path/*" /opt/stack/logs/ - fi -} - -function generate_testr_results { - # Give job user rights to access tox logs - sudo -H -u "$owner" chmod o+rw . - sudo -H -u "$owner" chmod o+rw -R .stestr - if [[ -f ".stestr/0" ]] ; then - ".tox/$venv/bin/subunit-1to2" < .stestr/0 > ./stestr.subunit - $SCRIPTS_DIR/subunit2html ./stestr.subunit testr_results.html - gzip -9 ./stestr.subunit - gzip -9 ./testr_results.html - sudo mv ./*.gz /opt/stack/logs/ - fi - - if [[ "$venv" == fullstack* ]] ; then - generate_test_logs "/tmp/${venv}-logs" - fi -} - -owner=stack -sudo_env= - -# Set owner permissions according to job's requirements. -cd "$KARBOR_DIR" -sudo chown -R $owner:stack "$KARBOR_DIR" - -# Run tests -echo "Running karbor $venv tests" -set +e -sudo -n -H -u "$owner" tox -e "$venv" -testr_exit_code=$? -set -e - -# Collect and parse results -generate_testr_results -exit $testr_exit_code diff --git a/karbor/tests/fullstack/__init__.py b/karbor/tests/fullstack/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/fullstack/karbor_base.py b/karbor/tests/fullstack/karbor_base.py deleted file mode 100644 index 587950a7..00000000 --- a/karbor/tests/fullstack/karbor_base.py +++ /dev/null @@ -1,175 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinderclient import client as cinder_client -from glanceclient import client as glance_client -from karborclient import client as karbor_client -from keystoneauth1 import identity -from keystoneauth1 import session -from manilaclient import client as manilaclient -from neutronclient.v2_0 import client as neutron_client -from novaclient import client as nova_client - -import functools -import os_client_config - -from oslotest import base - - -def _get_cloud_config(cloud='devstack'): - return os_client_config.OpenStackConfig().get_one_cloud(cloud=cloud) - - -def _credentials(cloud='devstack'): - """Retrieves credentials to run functional tests - - Credentials are either read via os-client-config from the environment - or from a config file ('clouds.yaml'). Environment variables override - those from the config file. - - devstack produces a clouds.yaml with two named clouds - one named - 'devstack' which has user privs and one named 'devstack-admin' which - has admin privs. This function will default to getting the devstack-admin - cloud as that is the current expected behavior. - """ - return _get_cloud_config(cloud=cloud).get_auth_args() - - -def _get_endpoint(service_type): - cloud_config = _get_cloud_config() - keystone_session = cloud_config.get_session_client(service_type) - keystone_auth = cloud_config.get_auth() - region_name = cloud_config.get_region_name() - return keystone_auth.get_endpoint( - keystone_session, - service_type=service_type, - region_name=region_name, - ) - - -def get_client_from_cloud_config(client): - creds = _credentials() - return get_client( - client, - auth_url=creds['auth_url'] + '/v3', - username=creds['username'], - password=creds['password'], - project_name=creds['project_name'], - project_domain_id=creds['project_domain_id'], - user_domain_id=creds['user_domain_id'], - ) - - -def get_client(client, **kwargs): - auth = identity.Password(**kwargs) - sess = session.Session(auth=auth) - return client(session=sess) - - -def _get_karbor_client(api_version='1'): - return get_client_from_cloud_config( - functools.partial( - karbor_client.Client, api_version, service_type='data-protect')) - - -def _get_cinder_client(api_version='3'): - return get_client_from_cloud_config( - functools.partial( - cinder_client.Client, api_version, service_type='volumev3')) - - -def _get_manila_client(api_version='2'): - return get_client_from_cloud_config( - functools.partial( - manilaclient.Client, api_version, service_type='sharev2')) - - -def _get_glance_client(api_version='2'): - return get_client_from_cloud_config( - functools.partial( - glance_client.Client, api_version, service_type='image')) - - -def _get_nova_client(api_version='2'): - return get_client_from_cloud_config( - functools.partial( - nova_client.Client, api_version, service_type='compute')) - - -def _get_neutron_client(): - return get_client_from_cloud_config( - functools.partial(neutron_client.Client, service_type='network')) - - -class ObjectStore(object): - """Stores objects for later closing. - - ObjectStore can be used to aggregate objects and close them. - - Example: - - with closing(ObjectStore()) as obj_store: - obj = obj_store.store(SomeObject()) - - or: - - obj_store = ObjectStore() - obj_store.store(SomeObject()) - obj_store.close() - """ - - def __init__(self): - super(ObjectStore, self).__init__() - self._close_funcs = [] - - def store(self, obj, close_func=None): - self._close_funcs.append(close_func if close_func else obj.close) - return obj - - def close(self): - for close_func in reversed(self._close_funcs): - close_func() - - -class KarborBaseTest(base.BaseTestCase): - """Basic class for karbor fullstack testing. - - This class has common code shared for karbor fullstack testing - including the various clients (karbor) and common - setup/cleanup code. - """ - - def setUp(self): - super(KarborBaseTest, self).setUp() - self.cinder_client = _get_cinder_client() - self.manila_client = _get_manila_client() - self.glance_client = _get_glance_client() - self.nova_client = _get_nova_client() - self.neutron_client = _get_neutron_client() - self.karbor_client = _get_karbor_client() - self.keystone_endpoint = _get_endpoint('identity') - self._testcase_store = ObjectStore() - self.provider_id_noop = 'b766f37c-d011-4026-8228-28730d734a3f' - self.provider_id_os = 'cf56bd3e-97a7-4078-b6d5-f36246333fd9' - self.provider_id_fs_bank = '6659007d-6f66-4a0f-9cb4-17d6aded0bb9' - self.provider_id_os_volume_snapshot = ( - '90d5bfea-a259-41e6-80c6-dcfcfcd9d827') - - def store(self, obj, close_func=None): - return self._testcase_store.store(obj, close_func) - - def tearDown(self): - self._testcase_store.close() - super(KarborBaseTest, self).tearDown() - - def provider_list(self): - return self.karbor_client.providers.list() diff --git a/karbor/tests/fullstack/karbor_objects.py b/karbor/tests/fullstack/karbor_objects.py deleted file mode 100644 index 2b2f8f6e..00000000 --- a/karbor/tests/fullstack/karbor_objects.py +++ /dev/null @@ -1,472 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from functools import partial - -from oslo_utils import uuidutils - -from karbor.common import constants -from karbor.tests.fullstack import karbor_base as base -from karbor.tests.fullstack import utils - -SHORT_TIMEOUT = 150 -MEDIUM_TIMEOUT = 450 -LONG_TIMEOUT = 900 - -SHORT_SLEEP = 3 -MEDIUM_SLEEP = 15 -LONG_SLEEP = 30 -HUGE_SLEEP = 100 - -DEFAULT_FLAVOR = "cirros256" -DEFAULT_NETWORK = "private" - - -class Checkpoint(object): - def __init__(self): - super(Checkpoint, self).__init__() - self.id = None - self._provider_id = None - self.karbor_client = base._get_karbor_client() - - def _checkpoint_status(self, status=None): - try: - cp = self.karbor_client.checkpoints.get(self._provider_id, self.id) - except Exception: - return False - - if status is None or cp.status == status: - return True - else: - return False - - def create(self, provider_id, plan_id, timeout=LONG_TIMEOUT): - self._provider_id = provider_id - checkpoint = self.karbor_client.checkpoints.create(provider_id, - plan_id) - self.id = checkpoint.id - utils.wait_until_true(partial(self._checkpoint_status, - constants.CHECKPOINT_STATUS_AVAILABLE), - timeout=timeout, sleep=HUGE_SLEEP) - return self.id - - def close(self, timeout=MEDIUM_TIMEOUT): - try: - self.karbor_client.checkpoints.delete(self._provider_id, self.id) - except Exception: - return - utils.wait_until_true(partial(self._checkpoint_status, - constants.CHECKPOINT_STATUS_DELETED), - timeout=timeout, sleep=LONG_SLEEP) - - -class Plan(object): - _name_id = 0 - - def __init__(self): - super(Plan, self).__init__() - self.id = None - self.karbor_client = base._get_karbor_client() - - def create(self, provider_id, resources, - parameters={}, name=None): - def _transform_resource(resource): - if isinstance(resource, dict): - return resource - if hasattr(resource, 'to_dict') and callable(resource.to_dict): - return resource.to_dict() - - if name is None: - name = "KarborFullstack-Plan-{id}".format( - id=self.__class__._name_id - ) - self.__class__._name_id += 1 - - resources = map(_transform_resource, resources) - plan = self.karbor_client.plans.create(name, provider_id, resources, - parameters) - self.id = plan.id - return self.id - - def update(self, data): - return self.karbor_client.plans.update(self.id, data) - - def close(self): - try: - self.karbor_client.plans.delete(self.id) - except Exception: - return - - -class Restore(object): - def __init__(self): - super(Restore, self).__init__() - self.id = None - self.karbor_client = base._get_karbor_client() - - def _restore_status(self, status=None): - try: - restore = self.karbor_client.restores.get(self.id) - except Exception: - return False - - if status is None or restore.status == status: - return True - else: - return False - - def create(self, provider_id, checkpoint_id, target, parameters, - restore_auth, timeout=LONG_TIMEOUT): - restore = self.karbor_client.restores.create(provider_id, - checkpoint_id, - target, - parameters, - restore_auth) - self.id = restore.id - utils.wait_until_true(partial(self._restore_status, 'success'), - timeout=timeout, sleep=HUGE_SLEEP) - return self.id - - def close(self): - pass - - -class Trigger(object): - _name_id = 0 - - def __init__(self): - super(Trigger, self).__init__() - self.id = None - self.karbor_client = base._get_karbor_client() - - def create(self, type, properties, name=None): - if name is None: - name = "KarborFullstack-Trigger-{id}".format( - id=self.__class__._name_id - ) - self.__class__._name_id += 1 - - trigger = self.karbor_client.triggers.create(name, type, properties) - self.id = trigger.id - return self.id - - def close(self): - try: - self.karbor_client.triggers.delete(self.id) - except Exception: - return - - -class ScheduledOperation(object): - _name_id = 0 - - def __init__(self): - super(ScheduledOperation, self).__init__() - self.id = None - self.karbor_client = base._get_karbor_client() - - def create(self, operation_type, trigger_id, - operation_definition, name=None): - if name is None: - name = "KarborFullstack-Scheduled-Operation-{id}".format( - id=self.__class__._name_id - ) - self.__class__._name_id += 1 - - scheduled_operation = self.karbor_client.scheduled_operations.create( - name, - operation_type, - trigger_id, - operation_definition - ) - self.id = scheduled_operation.id - return self.id - - def close(self): - try: - self.karbor_client.scheduled_operations.delete(self.id) - except Exception: - return - - -class Server(object): - _name_id = 0 - - def __init__(self): - super(Server, self).__init__() - self.id = None - self._name = None - self.nova_client = base._get_nova_client() - self.neutron_client = base._get_neutron_client() - self.cinder_client = base._get_cinder_client() - self.glance_client = base._get_glance_client() - - def _server_status(self, status=None): - try: - server = self.nova_client.servers.get(self.id) - except Exception: - return False - - if status is None or status == server.status: - return True - else: - return False - - def to_dict(self): - return { - "id": self.id, - "type": constants.SERVER_RESOURCE_TYPE, - "name": self._name, - } - - def create(self, name=None, image=None, volume=None, flavor=DEFAULT_FLAVOR, - network=DEFAULT_NETWORK, timeout=LONG_TIMEOUT): - block_device_mapping_v2 = None - if volume: - block_device_mapping_v2 = [{ - 'uuid': volume, - 'source_type': 'volume', - 'destination_type': 'volume', - 'boot_index': 0, - 'delete_on_termination': False}] - else: - if not image: - images = self.glance_client.images.list() - for image_iter in images: - if image_iter['disk_format'] not in ('aki', 'ari') and ( - image_iter['name'].startswith('cirros')): - image = image_iter['id'] - break - assert image - flavor = self.nova_client.flavors.find(name=flavor) - if name is None: - name = "KarborFullstack-Server-{id}".format( - id=self.__class__._name_id - ) - self.__class__._name_id += 1 - self._name = name - - networks = self.neutron_client.list_networks(name=network) - assert len(networks['networks']) > 0 - network_id = networks['networks'][0]['id'] - - server = self.nova_client.servers.create( - name=name, - image=image, - block_device_mapping_v2=block_device_mapping_v2, - flavor=flavor, - nics=[{"net-id": network_id}], - ) - self.id = server.id - - utils.wait_until_true(partial(self._server_status, 'ACTIVE'), - timeout=timeout, sleep=MEDIUM_SLEEP) - return self.id - - def _volume_attached(self, volume_id): - volume_item = self.cinder_client.volumes.get(volume_id) - server_attachments = list(filter(lambda x: x['server_id'] == self.id, - volume_item.attachments)) - if len(server_attachments) > 0: - return True - else: - return False - - def attach_volume(self, volume_id, timeout=MEDIUM_TIMEOUT): - self.nova_client.volumes.create_server_volume(self.id, volume_id) - utils.wait_until_true(partial(self._volume_attached, volume_id), - timeout=timeout, sleep=SHORT_SLEEP) - - def _volume_detached(self, volume_id): - volume_item = self.cinder_client.volumes.get(volume_id) - server_attachments = list(filter(lambda x: x['server_id'] == self.id, - volume_item.attachments)) - if len(server_attachments) > 0: - return False - else: - return True - - def detach_volume(self, volume_id, timeout=MEDIUM_TIMEOUT): - self.nova_client.volumes.delete_server_volume(self.id, volume_id) - utils.wait_until_true(partial(self._volume_detached, volume_id), - timeout=timeout, sleep=SHORT_SLEEP) - - def close(self, timeout=MEDIUM_TIMEOUT): - try: - self.nova_client.servers.delete(self.id) - except Exception: - return - utils.wait_until_none(self._server_status, timeout=timeout, - sleep=MEDIUM_SLEEP) - - -class Volume(object): - _name_id = 0 - - def __init__(self): - super(Volume, self).__init__() - self.id = None - self._name = None - self.cinder_client = base._get_cinder_client() - self.glance_client = base._get_glance_client() - - def _volume_status(self, status=None): - try: - volume = self.cinder_client.volumes.get(self.id) - except Exception: - return False - - if status is None or status == volume.status: - return True - else: - return False - - def to_dict(self): - return { - "id": self.id, - "type": constants.VOLUME_RESOURCE_TYPE, - "name": self._name, - "extra_info": {'availability_zone': 'az1'}, - } - - def create(self, size, name=None, create_from_image=False, - timeout=LONG_TIMEOUT): - if name is None: - name = "KarborFullstack-Volume-{id}".format( - id=self.__class__._name_id - ) - self.__class__._name_id += 1 - - self._name = name - image = None - if create_from_image: - images = self.glance_client.images.list() - for image_iter in images: - if image_iter['disk_format'] not in ('aki', 'ari') and ( - image_iter['name'].startswith('cirros')): - image = image_iter['id'] - break - assert image - volume = self.cinder_client.volumes.create(size, name=name, - imageRef=image) - self.id = volume.id - utils.wait_until_true(partial(self._volume_status, 'available'), - timeout=timeout, sleep=MEDIUM_SLEEP) - return self.id - - def close(self, timeout=LONG_TIMEOUT): - try: - self.cinder_client.volumes.delete(self.id) - except Exception: - return - utils.wait_until_none(self._volume_status, timeout=timeout, - sleep=MEDIUM_SLEEP) - - -class Share(object): - _name_id = 0 - - def __init__(self): - super(Share, self).__init__() - self.id = None - self._name = None - self.manila_client = base._get_manila_client() - self.neutron_client = base._get_neutron_client() - - def _share_status(self, status=None): - try: - share = self.manila_client.shares.get(self.id) - except Exception: - return False - - if status is None or status == share.status: - return True - else: - return False - - def to_dict(self): - return { - "id": self.id, - "type": constants.SHARE_RESOURCE_TYPE, - "name": self._name, - } - - def create(self, share_proto, size, name=None, timeout=LONG_TIMEOUT): - if name is None: - name = "KarborFullstack-Share-{id}".format(id=self._name_id) - self._name_id += 1 - - self._name = name - share = self.manila_client.shares.create(share_proto, size, name=name) - self.id = share.id - utils.wait_until_true(partial(self._share_status, 'available'), - timeout=timeout, sleep=MEDIUM_SLEEP) - return self.id - - def close(self, timeout=MEDIUM_TIMEOUT): - try: - self.manila_client.shares.delete(self.id) - except Exception: - return - utils.wait_until_none(self._share_status, timeout=timeout, - sleep=MEDIUM_SLEEP) - - -class Network(object): - def __init__(self): - super(Network, self).__init__() - self.id = None - self.project_id = None - self._name = "private-net-%s" % uuidutils.generate_uuid() - self.neutron_client = base._get_neutron_client() - - def _network_status(self, status=None): - try: - networks = self.neutron_client.list_networks(name=self._name) - assert len(networks['networks']) > 0 - network = networks['networks'][0] - except Exception: - return False - - if status is None or status == network['status']: - return True - else: - return False - - def to_dict(self): - return { - "id": self.id, - "type": constants.NETWORK_RESOURCE_TYPE, - "name": self._name, - } - - def create(self, timeout=MEDIUM_TIMEOUT): - network = {'name': self._name, 'admin_state_up': True} - self.neutron_client.create_network({'network': network}) - - networks = self.neutron_client.list_networks(name=self._name) - assert len(networks['networks']) > 0 - network_id = networks['networks'][0]['id'] - self.id = network_id - self.project_id = networks['networks'][0]['tenant_id'] - - utils.wait_until_true(partial(self._network_status, 'ACTIVE'), - timeout=timeout, sleep=MEDIUM_SLEEP) - - return self.id - - def close(self, timeout=LONG_TIMEOUT): - try: - self.neutron_client.delete_network(self.id) - except Exception: - return - utils.wait_until_none(self._network_status, timeout=timeout, - sleep=MEDIUM_SLEEP) diff --git a/karbor/tests/fullstack/test_checkpoints.py b/karbor/tests/fullstack/test_checkpoints.py deleted file mode 100644 index 60aa22dc..00000000 --- a/karbor/tests/fullstack/test_checkpoints.py +++ /dev/null @@ -1,237 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.common import constants -from karbor.tests.fullstack import karbor_base -from karbor.tests.fullstack import karbor_objects as objects - - -class CheckpointsTest(karbor_base.KarborBaseTest): - """Test Checkpoints operation """ - def setUp(self): - super(CheckpointsTest, self).setUp() - self.provider_id = self.provider_id_os - - def test_checkpoint_create(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - volume_parameter_key = "OS::Cinder::Volume#{id}".format(id=volume.id) - backup_name = "volume-backup-{id}".format(id=volume.id) - parameters = { - "OS::Cinder::Volume": { - "backup_mode": "full", - "force": False - }, - volume_parameter_key: { - "backup_name": backup_name - } - } - plan.create(self.provider_id_os, [volume, ], - parameters=parameters) - - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id, plan.id, timeout=2400) - - search_opts = {"volume_id": volume.id} - backups = self.cinder_client.backups.list(search_opts=search_opts) - self.assertEqual(1, len(backups)) - - search_opts = {"name": backup_name} - backups = self.cinder_client.backups.list(search_opts=search_opts) - self.assertEqual(1, len(backups)) - - def test_checkpoint_delete(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id, [volume, ]) - - checkpoint = objects.Checkpoint() - checkpoint.create(self.provider_id, plan.id, timeout=2400) - checkpoint_item = self.karbor_client.checkpoints.get(self.provider_id, - checkpoint.id) - self.assertEqual(constants.CHECKPOINT_STATUS_AVAILABLE, - checkpoint_item.status) - - checkpoint.close() - items = self.karbor_client.checkpoints.list(self.provider_id) - ids = [item.id for item in items] - self.assertTrue(checkpoint.id not in ids) - - def test_checkpoint_list(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id_noop, [volume, ]) - - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_noop, plan.id, timeout=2400) - - items = self.karbor_client.checkpoints.list(self.provider_id_noop) - ids = [item.id for item in items] - self.assertTrue(checkpoint.id in ids) - - def test_checkpoint_get(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id, [volume, ]) - - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id, plan.id, timeout=2400) - - # sanity - checkpoint_item = self.karbor_client.checkpoints.get(self.provider_id, - checkpoint.id) - self.assertEqual(constants.CHECKPOINT_STATUS_AVAILABLE, - checkpoint_item.status) - self.assertEqual(checkpoint.id, checkpoint_item.id) - - def test_server_attached_volume_only_protect_server(self): - """Test checkpoint for server with attached volume - - Test checkpoint for server which has attached one volume, - but only add server in protect source - """ - volume = self.store(objects.Volume()) - volume.create(1) - server = self.store(objects.Server()) - server.create() - server.attach_volume(volume.id) - - plan = self.store(objects.Plan()) - plan.create(self.provider_id_noop, [server, ]) - - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_noop, plan.id, timeout=2400) - - items = self.karbor_client.checkpoints.list(self.provider_id_noop) - ids = [item.id for item in items] - self.assertTrue(checkpoint.id in ids) - - def test_server_attached_volume_protect_both(self): - """Test checkpoint for server with attached volume - - Test checkpoint for server which has attached one volume, - and add server and volume both in protect source - """ - volume = self.store(objects.Volume()) - volume.create(1) - server = self.store(objects.Server()) - server.create() - server.attach_volume(volume.id) - - plan = self.store(objects.Plan()) - plan.create(self.provider_id_noop, [server, volume]) - - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_noop, plan.id, timeout=2400) - - items = self.karbor_client.checkpoints.list(self.provider_id_noop) - ids = [item.id for item in items] - self.assertTrue(checkpoint.id in ids) - - def test_server_boot_from_volume_with_attached_volume(self): - """Test checkpoint for server with a bootable volume - - Test checkpoint for server which has booted form one bootable - volume. - """ - bootable_volume = self.store(objects.Volume()) - bootable_volume_id = bootable_volume.create(1, create_from_image=True) - volume = self.store(objects.Volume()) - volume.create(1) - server = self.store(objects.Server()) - server.create(volume=bootable_volume_id) - server.attach_volume(volume.id) - - plan = self.store(objects.Plan()) - plan.create(self.provider_id, [server, ]) - - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id, plan.id, timeout=2400) - - items = self.karbor_client.checkpoints.list(self.provider_id) - ids = [item.id for item in items] - self.assertTrue(checkpoint.id in ids) - search_opts = {"volume_id": volume.id} - backups = self.cinder_client.backups.list(search_opts=search_opts) - self.assertEqual(1, len(backups)) - search_opts = {"volume_id": bootable_volume_id} - bootable_backups = self.cinder_client.backups.list( - search_opts=search_opts) - self.assertEqual(1, len(bootable_backups)) - server.detach_volume(volume.id) - - def test_checkpoint_share_projection(self): - share = self.store(objects.Share()) - share.create("NFS", 1) - plan = self.store(objects.Plan()) - - share_parameter_key = "OS::Manila::Share#{id}".format( - id=share.id) - snapshot_name = "share-snapshot-{id}".format(id=share.id) - parameters = { - "OS::Manila::Share": { - "force": False - }, - share_parameter_key: { - "snapshot_name": snapshot_name - } - } - plan.create(self.provider_id_os, [share, ], - parameters=parameters) - - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id, plan.id, timeout=2400) - - search_opts = {"share_id": share.id} - snapshots = self.manila_client.share_snapshots.list( - search_opts=search_opts) - self.assertEqual(1, len(snapshots)) - - search_opts = {"name": snapshot_name} - backups = self.manila_client.share_snapshots.list( - search_opts=search_opts) - self.assertEqual(1, len(backups)) - - def test_checkpoint_volume_snapshot(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - volume_parameter_key = "OS::Cinder::Volume#{id}".format(id=volume.id) - snapshot_name = "volume-snapshot-{id}".format(id=volume.id) - parameters = { - "OS::Cinder::Volume": { - "force": False - }, - volume_parameter_key: { - "snapshot_name": snapshot_name - } - } - plan.create(self.provider_id_os_volume_snapshot, [volume, ], - parameters=parameters) - - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_os_volume_snapshot, plan.id, - timeout=2400) - - search_opts = {"volume_id": volume.id} - snapshots = self.cinder_client.volume_snapshots.list( - search_opts=search_opts) - self.assertEqual(1, len(snapshots)) - - search_opts = {"name": snapshot_name} - snapshots = self.cinder_client.volume_snapshots.list( - search_opts=search_opts) - self.assertEqual(1, len(snapshots)) diff --git a/karbor/tests/fullstack/test_plans.py b/karbor/tests/fullstack/test_plans.py deleted file mode 100644 index 3ff98f08..00000000 --- a/karbor/tests/fullstack/test_plans.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.common import constants -from karbor.tests.fullstack import karbor_base -from karbor.tests.fullstack import karbor_objects as objects - - -class PlansTest(karbor_base.KarborBaseTest): - """Test Plans operation""" - def setUp(self): - super(PlansTest, self).setUp() - self.provider_id = self.provider_id_noop - - def test_plans_list(self): - # create plan - volume = self.store(objects.Volume()) - volume.create(1) - plan1 = self.store(objects.Plan()) - plan1.create(self.provider_id, [volume, ]) - plan2 = self.store(objects.Plan()) - plan2.create(self.provider_id, [volume, ]) - - # list plans after creating - items = self.karbor_client.plans.list() - ids = [item.id for item in items] - self.assertTrue(plan1.id in ids) - self.assertTrue(plan2.id in ids) - - def test_plans_get(self): - plan_name = "Fullstack Test Get" - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id, [volume, ], name=plan_name) - - plan_item = self.karbor_client.plans.get(plan.id) - self.assertEqual(plan_name, plan_item.name) - - def test_plans_update(self): - plan_initial_name = "Fullstack Plan Pre-Update" - plan_updated_name = "Fullstack Plan Post-Update" - volume1_name = "Fullstack Plan Update Volume1" - volume2_name = "Fullstack Plan Update Volume2" - volume1 = self.store(objects.Volume()) - volume1.create(1, name=volume1_name) - volume2 = self.store(objects.Volume()) - volume2.create(1, name=volume2_name) - plan = self.store(objects.Plan()) - plan.create(self.provider_id, [volume1, ], name=plan_initial_name) - - # sanity - plan_item = self.karbor_client.plans.get(plan.id) - self.assertEqual(plan_initial_name, plan_item.name) - self.assertEqual("suspended", plan_item.status) - self.assertEqual([{"id": volume1.id, - "type": constants.VOLUME_RESOURCE_TYPE, - "name": volume1_name, - "extra_info": - {"availability_zone": "az1"}}], - plan_item.resources) - - # update name - data = {"name": plan_updated_name} - plan_item = self.karbor_client.plans.update(plan.id, data) - self.assertEqual(plan_updated_name, plan_item.name) - - # update resources - data = {"resources": [volume2.to_dict(), ]} - plan_item = self.karbor_client.plans.update(plan.id, data) - self.assertEqual([{"id": volume2.id, - "type": constants.VOLUME_RESOURCE_TYPE, - "name": volume2_name, - "extra_info": - {"availability_zone": "az1"}}], - plan_item.resources) - - # update status - data = {"status": "started"} - plan_item = self.karbor_client.plans.update(plan.id, data) - self.assertEqual("started", plan_item.status) diff --git a/karbor/tests/fullstack/test_protectables.py b/karbor/tests/fullstack/test_protectables.py deleted file mode 100644 index 050e847c..00000000 --- a/karbor/tests/fullstack/test_protectables.py +++ /dev/null @@ -1,102 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.common.constants import RESOURCE_TYPES -from karbor.tests.fullstack import karbor_base -from karbor.tests.fullstack import karbor_objects as objects - - -class ProtectablesTest(karbor_base.KarborBaseTest): - """Test Protectables operation """ - - def test_protectables_list(self): - items = self.karbor_client.protectables.list() - query_types = [item.protectable_type for item in items] - self.assertItemsEqual(RESOURCE_TYPES, query_types) - - def test_protectables_get(self): - protectable_type = 'OS::Keystone::Project' - res = self.karbor_client.protectables.get(protectable_type) - self.assertEqual(protectable_type, res.name) - - protectable_type = 'OS::Nova::Server' - res = self.karbor_client.protectables.get(protectable_type) - self.assertEqual(protectable_type, res.name) - - protectable_type = 'OS::Neutron::Network' - res = self.karbor_client.protectables.get(protectable_type) - self.assertEqual(protectable_type, res.name) - - def test_protectables_list_instances(self): - volume = self.store(objects.Volume()) - volume.create(1) - items = self.karbor_client.protectables.list_instances( - 'OS::Cinder::Volume') - ids = [item.id for item in items] - self.assertTrue(volume.id in ids) - - server = self.store(objects.Server()) - server.create() - items = self.karbor_client.protectables.list_instances( - 'OS::Nova::Server') - ids = [item.id for item in items] - self.assertTrue(server.id in ids) - - network = self.store(objects.Network()) - network.create() - items = self.karbor_client.protectables.list_instances( - 'OS::Neutron::Network') - self.assertEqual(items[0].id, network.project_id) - - def test_protectables_get_instance(self): - volume = self.store(objects.Volume()) - volume.create(1) - instance = self.karbor_client.protectables.get_instance( - 'OS::Cinder::Volume', volume.id) - self.assertEqual(volume.id, instance.id) - - server = self.store(objects.Server()) - server.create() - instance = self.karbor_client.protectables.get_instance( - 'OS::Nova::Server', server.id) - self.assertEqual(server.id, instance.id) - - def test_protectables_get_attach_volume_instance(self): - server = self.store(objects.Server()) - server.create() - - volume = self.store(objects.Volume()) - volume.create(1) - - server.attach_volume(volume.id) - volume_item = self.cinder_client.volumes.get(volume.id) - ins_res = self.karbor_client.protectables.get_instance( - 'OS::Nova::Server', volume_item.attachments[0]["server_id"]) - self.assertTrue(ins_res.dependent_resources) - self.assertEqual('OS::Glance::Image', - ins_res.dependent_resources[0]["type"]) - self.assertEqual('OS::Cinder::Volume', - ins_res.dependent_resources[1]["type"]) - self.assertEqual(volume.id, - ins_res.dependent_resources[1]["id"]) - - def test_share_protectables_list_instances(self): - self.skipTest('Waiting new manilaclient being merged.') - res_list = self.karbor_client.protectables.list_instances( - 'OS::Manila::Share') - before_num = len(res_list) - share = self.store(objects.Share()) - share.create("NFS", 1) - res_list = self.karbor_client.protectables.list_instances( - 'OS::Manila::Share') - after_num = len(res_list) - self.assertEqual(1, after_num - before_num) diff --git a/karbor/tests/fullstack/test_providers.py b/karbor/tests/fullstack/test_providers.py deleted file mode 100644 index 4dc3f630..00000000 --- a/karbor/tests/fullstack/test_providers.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from karbor.tests.fullstack import karbor_base - - -class ProvidersTest(karbor_base.KarborBaseTest): - """Test Providers operation""" - - def test_providers_list(self): - provider_res = self.karbor_client.providers.list() - self.assertTrue(len(provider_res)) - - def test_provider_get(self): - providers = self.karbor_client.providers.list() - for provider in providers: - provider_res = self.karbor_client.providers.get(provider.id) - self.assertEqual(provider.name, provider_res.name) diff --git a/karbor/tests/fullstack/test_restores.py b/karbor/tests/fullstack/test_restores.py deleted file mode 100644 index 1f969502..00000000 --- a/karbor/tests/fullstack/test_restores.py +++ /dev/null @@ -1,207 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from oslo_utils import importutils - -from karbor.common import constants -from karbor.tests.fullstack import karbor_base -from karbor.tests.fullstack import karbor_objects as objects - -DELETABLE_STATUS = { - "volume_deletable_status": ["available", "error"], - "server_deletable_status": ["available", "running"] -} - - -class RestoresTest(karbor_base.KarborBaseTest): - """Test Restores operation """ - parameters = {} - restore_auth = { - "type": "password", - "username": "admin", - "password": "password", - } - - def _store(self, resources_status): - if not isinstance(resources_status, dict): - return - - for resource, status in resources_status.items(): - resource_type, resource_id = resource.split("#") - if resource_type is None: - continue - - types = resource_type.split("::") - if len(types) < 3: - continue - - try: - obj_class = importutils.import_class( - "karbor.tests.fullstack.karbor_objects.%s" % types[2]) - except Exception: - continue - - deletable_str = "%s_deletable_status" % types[2].lower() - deletable_list = eval(deletable_str, DELETABLE_STATUS) - if callable(obj_class) and status in deletable_list: - obj = obj_class() - obj.id = resource_id - obj.close() - - @staticmethod - def get_restore_target(endpoint): - regex = re.compile( - r'http[s]?://\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}' - r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b', re.IGNORECASE - ) - url = re.search(regex, endpoint).group() - restore_target = url + r"/identity/v3" - return restore_target - - def test_restore_create(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id_noop, [volume, ]) - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_noop, plan.id) - - restore_target = self.get_restore_target(self.keystone_endpoint) - restore = self.store(objects.Restore()) - restore.create(self.provider_id_noop, checkpoint.id, - restore_target, self.parameters, self.restore_auth) - - item = self.karbor_client.restores.get(restore.id) - self.assertEqual(constants.RESTORE_STATUS_SUCCESS, - item.status) - self._store(item.resources_status) - - def test_restore_create_without_target_and_auth(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id_noop, [volume, ]) - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_noop, plan.id) - - restore = self.store(objects.Restore()) - restore.create(self.provider_id_noop, checkpoint.id, - None, self.parameters, None) - - item = self.karbor_client.restores.get(restore.id) - self.assertEqual(constants.RESTORE_STATUS_SUCCESS, - item.status) - self._store(item.resources_status) - - def test_restore_get(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id_noop, [volume, ]) - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_noop, plan.id) - - restore_target = self.get_restore_target(self.keystone_endpoint) - restore = self.store(objects.Restore()) - restore.create(self.provider_id_noop, checkpoint.id, - restore_target, self.parameters, self.restore_auth) - - item = self.karbor_client.restores.get(restore.id) - self.assertEqual(restore.id, item.id) - self.assertEqual(constants.RESTORE_STATUS_SUCCESS, - item.status) - self._store(item.resources_status) - - def test_restore_list(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id_noop, [volume, ]) - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_noop, plan.id) - - restores = self.karbor_client.restores.list() - before_num = len(restores) - - restore_target = self.get_restore_target(self.keystone_endpoint) - restore1 = self.store(objects.Restore()) - restore1.create(self.provider_id_noop, checkpoint.id, - restore_target, self.parameters, self.restore_auth) - restore2 = self.store(objects.Restore()) - restore2.create(self.provider_id_noop, checkpoint.id, - restore_target, self.parameters, self.restore_auth) - - restores = self.karbor_client.restores.list() - after_num = len(restores) - self.assertLessEqual(2, after_num - before_num) - - item1 = self.karbor_client.restores.get(restore1.id) - self._store(item1.resources_status) - item2 = self.karbor_client.restores.get(restore2.id) - self._store(item2.resources_status) - - def test_restore_resources(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id_os, [volume, ]) - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_os, plan.id) - - restore_target = self.get_restore_target(self.keystone_endpoint) - restore = self.store(objects.Restore()) - restore.create(self.provider_id_os, checkpoint.id, - restore_target, self.parameters, self.restore_auth) - - item = self.karbor_client.restores.get(restore.id) - self.assertEqual(constants.RESTORE_STATUS_SUCCESS, - item.status) - self.assertEqual(1, len(item.resources_status)) - self._store(item.resources_status) - - def test_restore_network_resources(self): - network = self.store(objects.Network()) - network.create() - plan = self.store(objects.Plan()) - plan.create(self.provider_id_os, [network, ]) - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_os, plan.id) - network.close() - - restore_target = self.get_restore_target(self.keystone_endpoint) - restore = self.store(objects.Restore()) - restore.create(self.provider_id_os, checkpoint.id, - restore_target, self.parameters, self.restore_auth) - - item = self.karbor_client.restores.get(restore.id) - self.assertEqual(constants.RESTORE_STATUS_SUCCESS, - item.status) - self._store(item.resources_status) - - def test_restore_resources_with_fs_bank(self): - volume = self.store(objects.Volume()) - volume.create(1) - plan = self.store(objects.Plan()) - plan.create(self.provider_id_noop, [volume, ]) - checkpoint = self.store(objects.Checkpoint()) - checkpoint.create(self.provider_id_noop, plan.id) - - restore_target = self.get_restore_target(self.keystone_endpoint) - restore = self.store(objects.Restore()) - restore.create(self.provider_id_noop, checkpoint.id, - restore_target, self.parameters, self.restore_auth) - - item = self.karbor_client.restores.get(restore.id) - self.assertEqual(constants.RESTORE_STATUS_SUCCESS, - item.status) diff --git a/karbor/tests/fullstack/test_scheduled_operations.py b/karbor/tests/fullstack/test_scheduled_operations.py deleted file mode 100644 index 289cc9e1..00000000 --- a/karbor/tests/fullstack/test_scheduled_operations.py +++ /dev/null @@ -1,172 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import eventlet - -from datetime import datetime -from functools import partial - -from karbor.common import constants -from karbor.services.operationengine.engine.triggers.timetrigger \ - .timeformats import calendar_time -from karbor.tests.fullstack import karbor_base -from karbor.tests.fullstack import karbor_objects as objects -from karbor.tests.fullstack import utils - -pattern = "BEGIN:VEVENT\nRRULE:FREQ=WEEKLY;INTERVAL=1;\nEND:VEVENT" -DEFAULT_PROPERTY = { - 'pattern': pattern, - 'format': 'calendar' -} - - -class ScheduledOperationsTest(karbor_base.KarborBaseTest): - """Test Scheduled Operations operation - - """ - def setUp(self): - super(ScheduledOperationsTest, self).setUp() - providers = self.provider_list() - self.assertTrue(len(providers)) - self.provider_id = self.provider_id_noop - - def _create_scheduled_operation( - self, - resources, - trigger_properties=DEFAULT_PROPERTY, - operation_name=None): - plan = self.store(objects.Plan()) - plan.create(self.provider_id, resources) - operation_definition = {'plan_id': plan.id, - 'provider_id': self.provider_id} - trigger = self.store(objects.Trigger()) - trigger.create('time', trigger_properties) - - operation = objects.ScheduledOperation() - operation.create('protect', trigger.id, - operation_definition, operation_name) - return operation - - def _create_for_volume(self, - trigger_properties=DEFAULT_PROPERTY, - operation_name=None): - volume = self.store(objects.Volume()) - volume.create(1) - return self._create_scheduled_operation([volume, ], - trigger_properties, - operation_name) - - def _create_for_server(self, - trigger_properties=DEFAULT_PROPERTY, - operation_name=None): - server = self.store(objects.Server()) - server.create() - return self._create_scheduled_operation([server, ], - trigger_properties, - operation_name) - - def test_scheduled_operations_create_no_scheduled(self): - name = "KarborFullstack-Scheduled-Operation-no-scheduled" - operation = self.store(self._create_for_volume(operation_name=name)) - - item = self.karbor_client.scheduled_operations.get(operation.id) - self.assertEqual(name, item.name) - - items = self.karbor_client.scheduled_operations.list() - ids = [item_.id for item_ in items] - self.assertTrue(operation.id in ids) - - @staticmethod - def _wait_timestamp(pattern, start_time, freq): - if not isinstance(freq, int) or freq <= 0: - return 0 - - cur_time = copy.deepcopy(start_time) - cal_obj = calendar_time.ICal(start_time, pattern) - for i in range(freq): - next_time = cal_obj.compute_next_time(cur_time) - cur_time = next_time - return (next_time - start_time).seconds - - def _checkpoint_status(self, checkpoint_id, status): - try: - cp = self.karbor_client.checkpoints.get(self.provider_id, - checkpoint_id) - except Exception: - return False - - if status is None or cp.status == status: - return True - else: - return False - - def test_scheduled_operations_create_and_scheduled(self): - freq = 2 - eventlet_grace = 20 - pattern = "BEGIN:VEVENT\nRRULE:FREQ=MINUTELY;INTERVAL=2;\nEND:VEVENT" - cur_property = {'pattern': pattern, 'format': 'calendar'} - - operation = self.store(self._create_for_volume(cur_property)) - start_time = datetime.now().replace(microsecond=0) - sleep_time = self._wait_timestamp(pattern, start_time, freq) - sleep_time += eventlet_grace - self.assertNotEqual(0, sleep_time) - eventlet.sleep(sleep_time) - - items = self.karbor_client.checkpoints.list(self.provider_id) - operation_item = self.karbor_client.scheduled_operations.get( - operation.id) - plan_id = operation_item.operation_definition["plan_id"] - cps = list(filter(lambda x: x.protection_plan["id"] == plan_id, items)) - self.assertEqual(freq, len(cps)) - - for cp in cps: - utils.wait_until_true( - partial(self._checkpoint_status, - cp.id, - constants.CHECKPOINT_STATUS_AVAILABLE), - timeout=objects.LONG_TIMEOUT, sleep=objects.LONG_SLEEP - ) - checkpoint = self.store(objects.Checkpoint()) - checkpoint._provider_id = self.provider_id - checkpoint.id = cp.id - - def test_scheduled_operations_list(self): - operation1 = self.store(self._create_for_volume()) - operation2 = self.store(self._create_for_server()) - - items = self.karbor_client.scheduled_operations.list() - ids = [item.id for item in items] - self.assertTrue(operation1.id in ids) - self.assertTrue(operation2.id in ids) - - def test_scheduled_operations_get(self): - name = "KarborFullstack-Scheduled-Operation-Test-Get" - operation = self._create_for_volume(operation_name=name) - self.store(operation) - - item = self.karbor_client.scheduled_operations.get(operation.id) - self.assertEqual(item.name, name) - self.assertEqual(item.id, operation.id) - - def test_scheduled_operations_delete(self): - name = "KarborFullstack-Scheduled-Operation-Test-Delete" - operation = self._create_for_volume(operation_name=name) - - item = self.karbor_client.scheduled_operations.get(operation.id) - self.assertEqual(name, item.name) - - operation.close() - items = self.karbor_client.scheduled_operations.list() - ids = [item_.id for item_ in items] - self.assertTrue(operation.id not in ids) diff --git a/karbor/tests/fullstack/test_triggers.py b/karbor/tests/fullstack/test_triggers.py deleted file mode 100644 index 5d4393e1..00000000 --- a/karbor/tests/fullstack/test_triggers.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from datetime import datetime -from karbor.tests.fullstack import karbor_base -from karbor.tests.fullstack import karbor_objects as objects - - -class TriggersTest(karbor_base.KarborBaseTest): - """Test Triggers operation""" - - def test_triggers_list(self): - pattern1 = "BEGIN:VEVENT\nRRULE:FREQ=HOURLY;INTERVAL=1;\nEND:VEVENT" - trigger1 = self.store(objects.Trigger()) - trigger1.create('time', {'pattern': pattern1, 'format': 'calendar'}) - pattern2 = "BEGIN:VEVENT\nRRULE:FREQ=WEEKLY;INTERVAL=1;\nEND:VEVENT" - trigger2 = self.store(objects.Trigger()) - trigger2.create('time', {'pattern': pattern2, 'format': 'calendar'}) - - items = self.karbor_client.triggers.list() - ids = [item.id for item in items] - self.assertTrue(trigger1.id in ids) - self.assertTrue(trigger2.id in ids) - - def test_triggers_get(self): - trigger_name = "FullStack Trigger Test Get" - pattern = "BEGIN:VEVENT\nRRULE:FREQ=WEEKLY;INTERVAL=1;\nEND:VEVENT" - trigger = self.store(objects.Trigger()) - trigger.create('time', {'pattern': pattern, 'format': 'calendar'}, - name=trigger_name) - trigger = self.karbor_client.triggers.get(trigger.id) - self.assertEqual(trigger_name, trigger.name) - - def test_triggers_update(self): - trigger_name = "FullStack Trigger Test Update" - pattern1 = "BEGIN:VEVENT\nRRULE:FREQ=WEEKLY;INTERVAL=1;\nEND:VEVENT" - pattern2 = "BEGIN:VEVENT\nRRULE:FREQ=DAILY;INTERVAL=1;\nEND:VEVENT" - trigger = self.store(objects.Trigger()) - trigger.create('time', {'pattern': pattern1, 'format': 'calendar'}, - name=trigger_name) - properties = { - 'properties': { - 'pattern': pattern2, - 'format': 'calendar', - 'start_time': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), - } - } - - self.karbor_client.triggers.update( - trigger.id, - properties, - ) - - trigger = self.karbor_client.triggers.get(trigger.id) - self.assertEqual(trigger.properties['pattern'], pattern2) - - def test_triggers_delete(self): - pattern = "BEGIN:VEVENT\nRRULE:FREQ=WEEKLY;INTERVAL=1;\nEND:VEVENT" - trigger = objects.Trigger() - trigger.create('time', {'pattern': pattern, 'format': 'calendar'}) - self.karbor_client.triggers.delete(trigger.id) - items = self.karbor_client.triggers.list() - ids = [item.id for item in items] - self.assertTrue(trigger.id not in ids) diff --git a/karbor/tests/fullstack/utils.py b/karbor/tests/fullstack/utils.py deleted file mode 100644 index a442363c..00000000 --- a/karbor/tests/fullstack/utils.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - - -def wait_until_true(predicate, timeout=60, sleep=1, exception=None): - """Wait until callable predicate is evaluated as True - - :param predicate: Callable deciding whether waiting should continue. - Best practice is to instantiate predicate with functools.partial() - :param timeout: Timeout in seconds how long should function wait. - :param sleep: Polling interval for results in seconds. - :param exception: Exception class for eventlet.Timeout. - (see doc for eventlet.Timeout for more information) - - """ - with eventlet.timeout.Timeout(timeout, exception): - while not predicate(): - eventlet.sleep(sleep) - - -def wait_until_is_and_return(predicate, timeout=5, sleep=1, exception=None): - container = {} - - def internal_predicate(): - container['value'] = predicate() - return container['value'] - - wait_until_true(internal_predicate, timeout, sleep, exception) - return container.get('value') - - -def wait_until_none(predicate, timeout=5, sleep=1, exception=None): - def internal_predicate(): - ret = predicate() - if ret: - return False - return True - wait_until_true(internal_predicate, timeout, sleep, exception) diff --git a/karbor/tests/test_karbor.py b/karbor/tests/test_karbor.py deleted file mode 100644 index e8270cdd..00000000 --- a/karbor/tests/test_karbor.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_karbor ----------------------------------- - -Tests for `karbor` module. -""" - -from karbor.tests import base - - -class TestKarbor(base.TestCase): - - def test_something(self): - pass diff --git a/karbor/tests/unit/__init__.py b/karbor/tests/unit/__init__.py deleted file mode 100644 index 34945c47..00000000 --- a/karbor/tests/unit/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - -from karbor import objects - -eventlet.monkey_patch() -objects.register_all() diff --git a/karbor/tests/unit/api/__init__.py b/karbor/tests/unit/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/api/fakes.py b/karbor/tests/unit/api/fakes.py deleted file mode 100644 index 7390e7fa..00000000 --- a/karbor/tests/unit/api/fakes.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_service import wsgi -from oslo_utils import uuidutils - -import routes -import webob -import webob.dec -import webob.request - -from karbor.api.openstack import wsgi as os_wsgi -from karbor import context -from karbor.services.protection.protection_plugins.volume \ - import volume_plugin_cinder_schemas as cinder_schemas - -FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' -FAKE_UUIDS = {} -PROVIDER_OS = { - "description": "This provider uses OpenStack's own services " - "(swift, cinder) as storage", - "extended_info_schema": { - "options_schema": { - "OS::Cinder::Volume": cinder_schemas.OPTIONS_SCHEMA - }, - "saved_info_schema": { - "OS::Cinder::Volume": cinder_schemas.SAVED_INFO_SCHEMA - }, - "restore_schema": { - "OS::Cinder::Volume": cinder_schemas.RESTORE_SCHEMA - } - }, - "id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "name": "OS Infra Provider" -} - - -class Context(object): - pass - - -class FakeRouter(wsgi.Router): - def __init__(self, ext_mgr=None): - pass - - @webob.dec.wsgify - def __call__(self, req): - res = webob.Response() - res.status = '200' - res.headers['X-Test-Success'] = 'True' - return res - - -@webob.dec.wsgify -def fake_wsgi(self, req): - return self.application - - -class FakeToken(object): - id_count = 0 - - def __getitem__(self, key): - return getattr(self, key) - - def __init__(self, **kwargs): - FakeToken.id_count += 1 - self.id = FakeToken.id_count - for k, v in kwargs.items(): - setattr(self, k, v) - - -class FakeRequestContext(context.RequestContext): - def __init__(self, *args, **kwargs): - kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') - super(FakeRequestContext, self).__init__(*args, **kwargs) - - -class HTTPRequest(webob.Request): - - @classmethod - def blank(cls, *args, **kwargs): - if args is not None: - if args[0].find('v1') == 0: - kwargs['base_url'] = 'http://localhost/v1' - else: - kwargs['base_url'] = 'http://localhost/v2' - - use_admin_context = kwargs.pop('use_admin_context', False) - out = os_wsgi.Request.blank(*args, **kwargs) - out.environ['karbor.context'] = FakeRequestContext( - 'fake_user', - 'fakeproject', - is_admin=use_admin_context) - return out - - -class TestRouter(wsgi.Router): - def __init__(self, controller): - mapper = routes.Mapper() - mapper.resource("test", "tests", - controller=os_wsgi.Resource(controller)) - super(TestRouter, self).__init__(mapper) - - -def get_fake_uuid(token=0): - if token not in FAKE_UUIDS: - FAKE_UUIDS[token] = uuidutils.generate_uuid() - return FAKE_UUIDS[token] diff --git a/karbor/tests/unit/api/middleware/__init__.py b/karbor/tests/unit/api/middleware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/api/middleware/test_auth.py b/karbor/tests/unit/api/middleware/test_auth.py deleted file mode 100644 index 853f9c09..00000000 --- a/karbor/tests/unit/api/middleware/test_auth.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_middleware import request_id -import webob - -import karbor.api.middleware.auth -from karbor.tests import base - - -class TestKarborKeystoneContextMiddleware(base.TestCase): - - def setUp(self): - super(TestKarborKeystoneContextMiddleware, self).setUp() - - @webob.dec.wsgify() - def fake_app(req): - self.context = req.environ['karbor.context'] - return webob.Response() - - self.context = None - self.middleware = (karbor.api.middleware.auth - .KarborKeystoneContext(fake_app)) - self.request = webob.Request.blank('/') - self.request.headers['X_TENANT_ID'] = 'testtenantid' - self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' - - def test_no_user_or_user_id(self): - response = self.request.get_response(self.middleware) - self.assertEqual('401 Unauthorized', response.status) - - def test_user_only(self): - self.request.headers['X_USER'] = 'testuser' - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual('testuser', self.context.user_id) - - def test_user_id_only(self): - self.request.headers['X_USER_ID'] = 'testuserid' - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual('testuserid', self.context.user_id) - - def test_user_id_trumps_user(self): - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.headers['X_USER'] = 'testuser' - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual('testuserid', self.context.user_id) - - def test_tenant_id_name(self): - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.headers['X_TENANT_NAME'] = 'testtenantname' - response = self.request.get_response(self.middleware) - self.assertEqual('200 OK', response.status) - self.assertEqual('testtenantid', self.context.project_id) - self.assertEqual('testtenantname', self.context.project_name) - - def test_request_id_extracted_from_env(self): - req_id = 'dummy-request-id' - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.environ[request_id.ENV_REQUEST_ID] = req_id - self.request.get_response(self.middleware) - self.assertEqual(req_id, self.context.request_id) diff --git a/karbor/tests/unit/api/openstack/__init__.py b/karbor/tests/unit/api/openstack/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/api/openstack/test_wsgi.py b/karbor/tests/unit/api/openstack/test_wsgi.py deleted file mode 100644 index 8dcdece5..00000000 --- a/karbor/tests/unit/api/openstack/test_wsgi.py +++ /dev/null @@ -1,125 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.api.openstack import wsgi -from karbor.tests import base - - -class RequestTest(base.TestCase): - def test_content_type_missing(self): - request = wsgi.Request.blank('/tests/123', method='POST') - request.body = b"" - self.assertIsNone(request.get_content_type()) - - def test_content_type_unsupported(self): - request = wsgi.Request.blank('/tests/123', method='POST') - request.headers["Content-Type"] = "text/html" - request.body = b"asdf
" - self.assertIsNone(request.get_content_type()) - - def test_content_type_with_charset(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "application/json; charset=UTF-8" - result = request.get_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_accept(self): - for content_type in ('application/json',): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = content_type - result = request.best_match_content_type() - self.assertEqual(content_type, result) - - def test_content_type_from_accept_best(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml, application/json" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_query_extension(self): - request = wsgi.Request.blank('/tests/123.json') - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - request = wsgi.Request.blank('/tests/123.invalid') - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_accept_default(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - -class ActionDispatcherTest(base.TestCase): - def test_dispatch(self): - serializer = wsgi.ActionDispatcher() - serializer.create = lambda x: 'pants' - self.assertEqual('pants', serializer.dispatch({}, action='create')) - - def test_dispatch_action_none(self): - serializer = wsgi.ActionDispatcher() - serializer.create = lambda x: 'pants' - serializer.default = lambda x: 'trousers' - self.assertEqual('trousers', serializer.dispatch({}, action=None)) - - def test_dispatch_default(self): - serializer = wsgi.ActionDispatcher() - serializer.create = lambda x: 'pants' - serializer.default = lambda x: 'trousers' - self.assertEqual('trousers', serializer.dispatch({}, action='update')) - - -class DictSerializerTest(base.TestCase): - def test_dispatch_default(self): - serializer = wsgi.DictSerializer() - self.assertEqual('', serializer.serialize({}, 'update')) - - -class JSONDictSerializerTest(base.TestCase): - def test_json(self): - input_dict = dict(servers=dict(a=(2, 3))) - expected_json = '{"servers":{"a":[2,3]}}' - serializer = wsgi.JSONDictSerializer() - result = serializer.serialize(input_dict) - result = result.replace('\n', '').replace(' ', '') - self.assertEqual(expected_json, result) - - -class TextDeserializerTest(base.TestCase): - def test_dispatch_default(self): - deserializer = wsgi.TextDeserializer() - self.assertEqual({}, deserializer.deserialize({}, 'update')) - - -class JSONDeserializerTest(base.TestCase): - def test_json(self): - data = """{"a": { - "a1": "1", - "a2": "2", - "bs": ["1", "2", "3", {"c": {"c1": "1"}}], - "d": {"e": "1"}, - "f": "1"}}""" - as_dict = { - 'body': { - 'a': { - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], - 'd': {'e': '1'}, - 'f': '1', - }, - }, - } - deserializer = wsgi.JSONDeserializer() - self.assertEqual(as_dict, deserializer.deserialize(data)) diff --git a/karbor/tests/unit/api/test_api_validation.py b/karbor/tests/unit/api/test_api_validation.py deleted file mode 100644 index 4ccc60cb..00000000 --- a/karbor/tests/unit/api/test_api_validation.py +++ /dev/null @@ -1,499 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -import fixtures -import six -from six.moves import http_client as http - -from karbor.api import validation -from karbor.api.validation import parameter_types -from karbor import exception -from karbor.tests import base - - -class FakeRequest(object): - environ = {} - - -class ValidationRegex(base.TestCase): - - def test_build_regex_range(self): - - def _get_all_chars(): - for i in range(0x7F): - yield six.unichr(i) - - self.useFixture(fixtures.MonkeyPatch( - 'karbor.api.validation.parameter_types._get_all_chars', - _get_all_chars)) - - r = parameter_types._build_regex_range(ws=False) - self.assertEqual(re.escape('!') + '-' + re.escape('~'), r) - - # if we allow whitespace the range starts earlier - r = parameter_types._build_regex_range(ws=True) - self.assertEqual(re.escape(' ') + '-' + re.escape('~'), r) - - # excluding a character will give us 2 ranges - r = parameter_types._build_regex_range(ws=True, exclude=['A']) - self.assertEqual(re.escape(' ') + '-' + re.escape('@') + - 'B' + '-' + re.escape('~'), r) - - # inverting which gives us all the initial unprintable characters. - r = parameter_types._build_regex_range(ws=False, invert=True) - self.assertEqual(re.escape('\x00') + '-' + re.escape(' '), r) - - # excluding characters that create a singleton. Naively this would be: - # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural. - r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C']) - self.assertEqual(re.escape(' ') + '-' + re.escape('@') + - 'B' + 'D' + '-' + re.escape('~'), r) - - # ws=True means the positive regex has printable whitespaces, - # so the inverse will not. The inverse will include things we - # exclude. - r = parameter_types._build_regex_range( - ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True) - self.assertEqual(re.escape('\x00') + '-' + re.escape('\x1f') + - 'A-CZ', r) - - -class APIValidationTestCase(base.TestCase): - - def setUp(self, schema=None): - super(APIValidationTestCase, self).setUp() - self.post = None - - if schema is not None: - @validation.schema(request_body_schema=schema) - def post(req, body): - return 'Validation succeeded.' - - self.post = post - - def check_validation_error(self, method, body, expected_detail, req=None): - if not req: - req = FakeRequest() - try: - method(body=body, req=req,) - except exception.ValidationError as ex: - self.assertEqual(http.BAD_REQUEST, ex.kwargs['code']) - if isinstance(expected_detail, list): - self.assertIn(ex.kwargs['detail'], expected_detail, - 'Exception details did not match expected') - elif not re.match(expected_detail, ex.kwargs['detail']): - self.assertEqual(expected_detail, ex.kwargs['detail'], - 'Exception details did not match expected') - except Exception as ex: - self.fail('An unexpected exception happens: %s' % ex) - else: - self.fail('Any exception did not happen.') - - -class RequiredDisableTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': 'integer', - }, - }, - } - super(RequiredDisableTestCase, self).setUp(schema=schema) - - def test_validate_required_disable(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 1}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'abc': 1}, req=FakeRequest())) - - -class RequiredEnableTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': 'integer', - }, - }, - 'required': ['foo'] - } - super(RequiredEnableTestCase, self).setUp(schema=schema) - - def test_validate_required_enable(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 1}, req=FakeRequest())) - - def test_validate_required_enable_fails(self): - detail = "'foo' is a required property" - self.check_validation_error(self.post, body={'abc': 1}, - expected_detail=detail) - - -class AdditionalPropertiesEnableTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': 'integer', - }, - }, - 'required': ['foo'], - } - super(AdditionalPropertiesEnableTestCase, self).setUp(schema=schema) - - def test_validate_additionalProperties_enable(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 1}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 1, 'ext': 1}, - req=FakeRequest())) - - -class AdditionalPropertiesDisableTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': 'integer', - }, - }, - 'required': ['foo'], - 'additionalProperties': False, - } - super(AdditionalPropertiesDisableTestCase, self).setUp(schema=schema) - - def test_validate_additionalProperties_disable(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 1}, req=FakeRequest())) - - def test_validate_additionalProperties_disable_fails(self): - detail = "Additional properties are not allowed ('ext' was unexpected)" - self.check_validation_error(self.post, body={'foo': 1, 'ext': 1}, - expected_detail=detail) - - -class PatternPropertiesTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'patternProperties': { - '^[a-zA-Z0-9]{1,10}$': { - 'type': 'string' - }, - }, - 'additionalProperties': False, - } - super(PatternPropertiesTestCase, self).setUp(schema=schema) - - def test_validate_patternProperties(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 'bar'}, req=FakeRequest())) - - def test_validate_patternProperties_fails(self): - details = [ - "Additional properties are not allowed ('__' was unexpected)", - "'__' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'" - ] - self.check_validation_error(self.post, body={'__': 'bar'}, - expected_detail=details) - - details = [ - "'' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'", - "Additional properties are not allowed ('' was unexpected)" - ] - self.check_validation_error(self.post, body={'': 'bar'}, - expected_detail=details) - - details = [ - ("'0123456789a' does not match any of the regexes: " - "'^[a-zA-Z0-9]{1,10}$'"), - ("Additional properties are not allowed ('0123456789a' was" - " unexpected)") - ] - self.check_validation_error(self.post, body={'0123456789a': 'bar'}, - expected_detail=details) - - if six.PY3: - detail = "expected string or bytes-like object" - else: - detail = "expected string or buffer" - self.check_validation_error(self.post, body={None: 'bar'}, - expected_detail=detail) - - -class StringTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': 'string', - }, - }, - } - super(StringTestCase, self).setUp(schema=schema) - - def test_validate_string(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 'abc'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': '0'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': ''}, req=FakeRequest())) - - def test_validate_string_fails(self): - detail = ("Invalid input for field/attribute foo. Value: 1." - " 1 is not of type 'string'") - self.check_validation_error(self.post, body={'foo': 1}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 1.5." - " 1.5 is not of type 'string'") - self.check_validation_error(self.post, body={'foo': 1.5}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: True." - " True is not of type 'string'") - self.check_validation_error(self.post, body={'foo': True}, - expected_detail=detail) - - -class StringLengthTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': 'string', - 'minLength': 1, - 'maxLength': 10, - }, - }, - } - super(StringLengthTestCase, self).setUp(schema=schema) - - def test_validate_string_length(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': '0'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': '0123456789'}, - req=FakeRequest())) - - def test_validate_string_length_fails(self): - detail = ("Invalid input for field/attribute foo. Value: ." - " '' is too short") - self.check_validation_error(self.post, body={'foo': ''}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 0123456789a." - " '0123456789a' is too long") - self.check_validation_error(self.post, body={'foo': '0123456789a'}, - expected_detail=detail) - - -class IntegerTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': ['integer', 'string'], - 'pattern': '^[0-9]+$', - }, - }, - } - super(IntegerTestCase, self).setUp(schema=schema) - - def test_validate_integer(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 1}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': '1'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': '0123456789'}, - req=FakeRequest())) - - def test_validate_integer_fails(self): - detail = ("Invalid input for field/attribute foo. Value: abc." - " 'abc' does not match '^[0-9]+$'") - self.check_validation_error(self.post, body={'foo': 'abc'}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: True." - " True is not of type 'integer', 'string'") - self.check_validation_error(self.post, body={'foo': True}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 0xffff." - " '0xffff' does not match '^[0-9]+$'") - self.check_validation_error(self.post, body={'foo': '0xffff'}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 1.0." - " 1.0 is not of type 'integer', 'string'") - self.check_validation_error(self.post, body={'foo': 1.0}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 1.0." - " '1.0' does not match '^[0-9]+$'") - self.check_validation_error(self.post, body={'foo': '1.0'}, - expected_detail=detail) - - -class IntegerRangeTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': ['integer', 'string'], - 'pattern': '^[0-9]+$', - 'minimum': 1, - 'maximum': 10, - }, - }, - } - super(IntegerRangeTestCase, self).setUp(schema=schema) - - def test_validate_integer_range(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 1}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 10}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': '1'}, req=FakeRequest())) - - def test_validate_integer_range_fails(self): - detail = ("Invalid input for field/attribute foo. Value: 0." - " 0(.0)? is less than the minimum of 1") - self.check_validation_error(self.post, body={'foo': 0}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 11." - " 11(.0)? is greater than the maximum of 10") - self.check_validation_error(self.post, body={'foo': 11}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 0." - " 0(.0)? is less than the minimum of 1") - self.check_validation_error(self.post, body={'foo': '0'}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 11." - " 11(.0)? is greater than the maximum of 10") - self.check_validation_error(self.post, body={'foo': '11'}, - expected_detail=detail) - - -class BooleanTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': parameter_types.boolean, - }, - } - super(BooleanTestCase, self).setUp(schema=schema) - - def test_validate_boolean(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': True}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': False}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 'True'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 'False'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': '1'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': '0'}, req=FakeRequest())) - - def test_validate_boolean_fails(self): - enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On'," - " 'on', 'YES', 'Yes', 'yes', 'y', 't'," - " False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off'," - " 'off', 'NO', 'No', 'no', 'n', 'f']") - - detail = ("Invalid input for field/attribute foo. Value: bar." - " 'bar' is not one of %s") % enum_boolean - self.check_validation_error(self.post, body={'foo': 'bar'}, - expected_detail=detail) - - detail = ("Invalid input for field/attribute foo. Value: 2." - " '2' is not one of %s") % enum_boolean - self.check_validation_error(self.post, body={'foo': '2'}, - expected_detail=detail) - - -class NameTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': parameter_types.name, - }, - } - super(NameTestCase, self).setUp(schema=schema) - - def test_validate_name(self): - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 'volume.1'}, - req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 'volume 1'}, - req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': 'a'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': u'\u0434'}, req=FakeRequest())) - self.assertEqual('Validation succeeded.', - self.post(body={'foo': u'\u0434\u2006\ufffd'}, - req=FakeRequest())) - - -class DatetimeTestCase(APIValidationTestCase): - - def setUp(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': { - 'type': 'string', - 'format': 'date-time', - }, - }, - } - super(DatetimeTestCase, self).setUp(schema=schema) - - def test_validate_datetime(self): - self.assertEqual('Validation succeeded.', - self.post(body={ - 'foo': '2017-01-14T01:00:00Z'}, req=FakeRequest() - )) diff --git a/karbor/tests/unit/api/test_common.py b/karbor/tests/unit/api/test_common.py deleted file mode 100644 index 664d601a..00000000 --- a/karbor/tests/unit/api/test_common.py +++ /dev/null @@ -1,438 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test suites for 'common' code used throughout the OpenStack HTTP API. -""" - -from unittest import mock - -from oslo_config import cfg -from testtools import matchers -import webob -import webob.exc - -from karbor.api import common -from karbor.tests import base - - -NS = "{http://docs.openstack.org/compute/api/v1.1}" -ATOMNS = "{http://www.w3.org/2005/Atom}" -CONF = cfg.CONF - - -class PaginationParamsTest(base.TestCase): - """Unit tests for `karbor.api.common.get_pagination_params` method. - - This method takes in a request object and returns 'marker' and 'limit' - GET params. - """ - - def test_nonnumerical_limit(self): - """Test nonnumerical limit param.""" - req = webob.Request.blank('/?limit=hello') - self.assertRaises( - webob.exc.HTTPBadRequest, common.get_pagination_params, - req.GET.copy()) - - @mock.patch.object(common, 'CONF') - def test_no_params(self, mock_cfg): - """Test no params.""" - mock_cfg.osapi_max_limit = 100 - req = webob.Request.blank('/') - expected = (None, 100, 0) - self.assertEqual(expected, - common.get_pagination_params(req.GET.copy())) - - def test_valid_marker(self): - """Test valid marker param.""" - marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' - req = webob.Request.blank('/?marker=' + marker) - expected = (marker, CONF.osapi_max_limit, 0) - self.assertEqual(expected, - common.get_pagination_params(req.GET.copy())) - - def test_valid_limit(self): - """Test valid limit param.""" - req = webob.Request.blank('/?limit=10') - expected = (None, 10, 0) - self.assertEqual(expected, - common.get_pagination_params(req.GET.copy())) - - def test_invalid_limit(self): - """Test invalid limit param.""" - req = webob.Request.blank('/?limit=-2') - self.assertRaises( - webob.exc.HTTPBadRequest, common.get_pagination_params, - req.GET.copy()) - - def test_valid_limit_and_marker(self): - """Test valid limit and marker parameters.""" - marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' - req = webob.Request.blank('/?limit=20&marker=%s' % marker) - expected = (marker, 20, 0) - self.assertEqual(expected, - common.get_pagination_params(req.GET.copy())) - - -class SortParamUtilsTest(base.TestCase): - - def test_get_sort_params_defaults(self): - """Verifies the default sort key and direction.""" - sort_keys, sort_dirs = common.get_sort_params({}) - self.assertEqual(['created_at'], sort_keys) - self.assertEqual(['desc'], sort_dirs) - - def test_get_sort_params_override_defaults(self): - """Verifies that the defaults can be overridden.""" - sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1', - default_dir='dir1') - self.assertEqual(['key1'], sort_keys) - self.assertEqual(['dir1'], sort_dirs) - - def test_get_sort_params_single_value_sort_param(self): - """Verifies a single sort key and direction.""" - params = {'sort': 'key1:dir1'} - sort_keys, sort_dirs = common.get_sort_params(params) - self.assertEqual(['key1'], sort_keys) - self.assertEqual(['dir1'], sort_dirs) - - def test_get_sort_params_single_value_old_params(self): - """Verifies a single sort key and direction.""" - params = {'sort_key': 'key1', 'sort_dir': 'dir1'} - sort_keys, sort_dirs = common.get_sort_params(params) - self.assertEqual(['key1'], sort_keys) - self.assertEqual(['dir1'], sort_dirs) - - def test_get_sort_params_single_with_default_sort_param(self): - """Verifies a single sort value with a default direction.""" - params = {'sort': 'key1'} - sort_keys, sort_dirs = common.get_sort_params(params) - self.assertEqual(['key1'], sort_keys) - # Direction should be defaulted - self.assertEqual(['desc'], sort_dirs) - - def test_get_sort_params_single_with_default_old_params(self): - """Verifies a single sort value with a default direction.""" - params = {'sort_key': 'key1'} - sort_keys, sort_dirs = common.get_sort_params(params) - self.assertEqual(['key1'], sort_keys) - # Direction should be defaulted - self.assertEqual(['desc'], sort_dirs) - - def test_get_sort_params_multiple_values(self): - """Verifies multiple sort parameter values.""" - params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'} - sort_keys, sort_dirs = common.get_sort_params(params) - self.assertEqual(['key1', 'key2', 'key3'], sort_keys) - self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs) - - def test_get_sort_params_multiple_not_all_dirs(self): - """Verifies multiple sort keys without all directions.""" - params = {'sort': 'key1:dir1,key2,key3:dir3'} - sort_keys, sort_dirs = common.get_sort_params(params) - self.assertEqual(['key1', 'key2', 'key3'], sort_keys) - # Second key is missing the direction, should be defaulted - self.assertEqual(['dir1', 'desc', 'dir3'], sort_dirs) - - def test_get_sort_params_multiple_override_default_dir(self): - """Verifies multiple sort keys and overriding default direction.""" - params = {'sort': 'key1:dir1,key2,key3'} - sort_keys, sort_dirs = common.get_sort_params(params, - default_dir='foo') - self.assertEqual(['key1', 'key2', 'key3'], sort_keys) - self.assertEqual(['dir1', 'foo', 'foo'], sort_dirs) - - def test_get_sort_params_params_modified(self): - """Verifies that the input sort parameter are modified.""" - params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'} - common.get_sort_params(params) - self.assertEqual({}, params) - - params = {'sort_key': 'key1', 'sort_dir': 'dir1'} - common.get_sort_params(params) - self.assertEqual({}, params) - - def test_get_sort_params_random_spaces(self): - """Verifies that leading and trailing spaces are removed.""" - params = {'sort': ' key1 : dir1,key2: dir2 , key3 '} - sort_keys, sort_dirs = common.get_sort_params(params) - self.assertEqual(['key1', 'key2', 'key3'], sort_keys) - self.assertEqual(['dir1', 'dir2', 'desc'], sort_dirs) - - def test_get_params_mix_sort_and_old_params(self): - """An exception is raised if both types of sorting params are given.""" - for params in ({'sort': 'k1', 'sort_key': 'k1'}, - {'sort': 'k1', 'sort_dir': 'd1'}, - {'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}): - self.assertRaises(webob.exc.HTTPBadRequest, - common.get_sort_params, - params) - - -class MiscFunctionsTest(base.TestCase): - - def test_remove_major_version_from_href(self): - fixture = 'http://www.testsite.com/v1/images' - expected = 'http://www.testsite.com/images' - actual = common.remove_version_from_href(fixture) - self.assertEqual(expected, actual) - - def test_remove_version_from_href(self): - fixture = 'http://www.testsite.com/v1.1/images' - expected = 'http://www.testsite.com/images' - actual = common.remove_version_from_href(fixture) - self.assertEqual(expected, actual) - - def test_remove_version_from_href_2(self): - fixture = 'http://www.testsite.com/v1.1/' - expected = 'http://www.testsite.com/' - actual = common.remove_version_from_href(fixture) - self.assertEqual(expected, actual) - - def test_remove_version_from_href_3(self): - fixture = 'http://www.testsite.com/v10.10' - expected = 'http://www.testsite.com' - actual = common.remove_version_from_href(fixture) - self.assertEqual(expected, actual) - - def test_remove_version_from_href_4(self): - fixture = 'http://www.testsite.com/v1.1/images/v10.5' - expected = 'http://www.testsite.com/images/v10.5' - actual = common.remove_version_from_href(fixture) - self.assertEqual(expected, actual) - - def test_remove_version_from_href_bad_request(self): - fixture = 'http://www.testsite.com/1.1/images' - self.assertRaises(ValueError, - common.remove_version_from_href, - fixture) - - def test_remove_version_from_href_bad_request_2(self): - fixture = 'http://www.testsite.com/v/images' - self.assertRaises(ValueError, - common.remove_version_from_href, - fixture) - - def test_remove_version_from_href_bad_request_3(self): - fixture = 'http://www.testsite.com/v1.1images' - self.assertRaises(ValueError, - common.remove_version_from_href, - fixture) - - -class TestCollectionLinks(base.TestCase): - """Tests the _get_collection_links method.""" - - def _validate_next_link(self, item_count, osapi_max_limit, limit, - should_link_exist): - req = webob.Request.blank('/?limit=%s' % limit if limit else '/') - link_return = [{"rel": "next", "href": "fake_link"}] - self.flags(osapi_max_limit=osapi_max_limit) - if limit is None: - limited_list_size = min(item_count, osapi_max_limit) - else: - limited_list_size = min(item_count, osapi_max_limit, limit) - limited_list = [{"uuid": str(i)} for i in range(limited_list_size)] - builder = common.ViewBuilder() - - def get_pagination_params(params, max_limit=CONF.osapi_max_limit, - original_call=common.get_pagination_params): - return original_call(params, max_limit) - - def _get_limit_param(params, max_limit=CONF.osapi_max_limit, - original_call=common._get_limit_param): - return original_call(params, max_limit) - - with mock.patch.object(common, 'get_pagination_params', - get_pagination_params), \ - mock.patch.object(common, '_get_limit_param', - _get_limit_param), \ - mock.patch.object(common.ViewBuilder, '_generate_next_link', - return_value=link_return) as href_link_mock: - results = builder._get_collection_links(req, limited_list, - mock.sentinel.coll_key, - item_count, "uuid") - if should_link_exist: - href_link_mock.assert_called_once_with(limited_list, "uuid", - req, - mock.sentinel.coll_key) - self.assertThat(results, matchers.HasLength(1)) - else: - self.assertFalse(href_link_mock.called) - self.assertThat(results, matchers.HasLength(0)) - - def test_items_equals_osapi_max_no_limit(self): - item_count = 5 - osapi_max_limit = 5 - limit = None - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_equals_osapi_max_greater_than_limit(self): - item_count = 5 - osapi_max_limit = 5 - limit = 4 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_equals_osapi_max_equals_limit(self): - item_count = 5 - osapi_max_limit = 5 - limit = 5 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_equals_osapi_max_less_than_limit(self): - item_count = 5 - osapi_max_limit = 5 - limit = 6 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_less_than_osapi_max_no_limit(self): - item_count = 5 - osapi_max_limit = 7 - limit = None - should_link_exist = False - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_limit_less_than_items_less_than_osapi_max(self): - item_count = 5 - osapi_max_limit = 7 - limit = 4 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_limit_equals_items_less_than_osapi_max(self): - item_count = 5 - osapi_max_limit = 7 - limit = 5 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_less_than_limit_less_than_osapi_max(self): - item_count = 5 - osapi_max_limit = 7 - limit = 6 - should_link_exist = False - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_less_than_osapi_max_equals_limit(self): - item_count = 5 - osapi_max_limit = 7 - limit = 7 - should_link_exist = False - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_less_than_osapi_max_less_than_limit(self): - item_count = 5 - osapi_max_limit = 7 - limit = 8 - should_link_exist = False - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_greater_than_osapi_max_no_limit(self): - item_count = 5 - osapi_max_limit = 3 - limit = None - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_limit_less_than_items_greater_than_osapi_max(self): - item_count = 5 - osapi_max_limit = 3 - limit = 2 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_greater_than_osapi_max_equals_limit(self): - item_count = 5 - osapi_max_limit = 3 - limit = 3 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_greater_than_limit_greater_than_osapi_max(self): - item_count = 5 - osapi_max_limit = 3 - limit = 4 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_items_equals_limit_greater_than_osapi_max(self): - item_count = 5 - osapi_max_limit = 3 - limit = 5 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - def test_limit_greater_than_items_greater_than_osapi_max(self): - item_count = 5 - osapi_max_limit = 3 - limit = 6 - should_link_exist = True - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - -class LinkPrefixTest(base.TestCase): - def test_update_link_prefix(self): - vb = common.ViewBuilder() - result = vb._update_link_prefix("http://192.168.0.243:24/", - "http://127.0.0.1/volume") - self.assertEqual("http://127.0.0.1/volume", result) - - result = vb._update_link_prefix("http://foo.x.com/v1", - "http://new.prefix.com") - self.assertEqual("http://new.prefix.com/v1", result) - - result = vb._update_link_prefix( - "http://foo.x.com/v1", - "http://new.prefix.com:20455/new_extra_prefix") - self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1", - result) - - -class RequestUrlTest(base.TestCase): - def test_get_request_url_no_forward(self): - app_url = 'http://127.0.0.1/v2;param?key=value#frag' - request = type('', (), { - 'application_url': app_url, - 'headers': {} - }) - result = common.get_request_url(request) - self.assertEqual(app_url, result) - - def test_get_request_url_forward(self): - request = type('', (), { - 'application_url': 'http://127.0.0.1/v2;param?key=value#frag', - 'headers': {'X-Forwarded-Host': '192.168.0.243:24'} - }) - result = common.get_request_url(request) - self.assertEqual('http://192.168.0.243:24/v2;param?key=value#frag', - result) diff --git a/karbor/tests/unit/api/v1/__init__.py b/karbor/tests/unit/api/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/api/v1/test_copies.py b/karbor/tests/unit/api/v1/test_copies.py deleted file mode 100644 index ebbca70a..00000000 --- a/karbor/tests/unit/api/v1/test_copies.py +++ /dev/null @@ -1,123 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from webob import exc - -from karbor.api.v1 import copies -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - -PROVIDER_ID_1 = 'efc6a88b-9096-4bb6-8634-cda182a6e12a' -PROVIDER_ID_2 = '3241a88b-9096-4bb6-8634-cda182a6e12a' -DEFAULT_PROJECT_ID = '39bb894794b741e982bd26144d2949f6' -DEFAULT_PLAN_ID = '603b894794b741e982bd26144d2949f6' - - -class FakePlan(object): - def __init__(self, values): - self.id = values.get('id') - self.provider_id = values.get('provider_id') - self.parameters = values.get('parameters') - - -class CopiesApiTest(base.TestCase): - - def setUp(self): - super(CopiesApiTest, self).setUp() - self.controller = copies.CopiesController() - self.ctxt = context.RequestContext('demo', - DEFAULT_PROJECT_ID, True) - - @mock.patch('karbor.objects.Plan.get_by_id') - @mock.patch('karbor.services.protection.api.API.list_checkpoints') - @mock.patch('karbor.services.protection.api.API.copy') - def test_copies_create(self, mock_copy, - mock_list_checkpoints, mock_plan_get): - mock_plan_get.return_value = FakePlan( - {'id': DEFAULT_PLAN_ID, - 'provider_id': PROVIDER_ID_1, - 'parameters': {}}) - mock_list_checkpoints.return_value = ['fake_checkpoint_id'] - copy = self._copy_in_request_body(DEFAULT_PLAN_ID, {}) - body = {"copy": copy} - req = fakes.HTTPRequest.blank('/v1/copies') - self.controller.create(req, PROVIDER_ID_1, body=body) - self.assertEqual(True, mock_copy.called) - - def test_copies_create_with_invalid_provider_id(self): - copy = self._copy_in_request_body(DEFAULT_PLAN_ID, {}) - body = {"copy": copy} - req = fakes.HTTPRequest.blank('/v1/copies') - self.assertRaises(exception.InvalidInput, self.controller.create, req, - 'fake_invalid_provider_id', body=body) - - @mock.patch('karbor.objects.Plan.get_by_id') - def test_copies_create_with_invalid_plan(self, mock_plan_get): - mock_plan_get.side_effect = exception.PlanNotFound - copy = self._copy_in_request_body(DEFAULT_PLAN_ID, {}) - body = {"copy": copy} - req = fakes.HTTPRequest.blank('/v1/copies') - self.assertRaises(exc.HTTPNotFound, self.controller.create, req, - PROVIDER_ID_1, body=body) - - @mock.patch('karbor.objects.Plan.get_by_id') - def test_copies_create_with_different_provider_id(self, mock_plan_get): - mock_plan_get.return_value = FakePlan( - {'id': DEFAULT_PLAN_ID, - 'provider_id': PROVIDER_ID_2, - 'parameters': {}}) - copy = self._copy_in_request_body(DEFAULT_PLAN_ID, {}) - body = {"copy": copy} - req = fakes.HTTPRequest.blank('/v1/copies') - self.assertRaises(exception.InvalidInput, self.controller.create, req, - PROVIDER_ID_1, body=body) - - @mock.patch('karbor.objects.Plan.get_by_id') - @mock.patch('karbor.services.protection.api.API.list_checkpoints') - def test_copies_create_with_no_checkpoints_exist( - self, mock_list_checkpoints, mock_plan_get): - mock_plan_get.return_value = FakePlan( - {'id': DEFAULT_PLAN_ID, - 'provider_id': PROVIDER_ID_1, - 'parameters': {}}) - mock_list_checkpoints.return_value = [] - copy = self._copy_in_request_body(DEFAULT_PLAN_ID, {}) - body = {"copy": copy} - req = fakes.HTTPRequest.blank('/v1/copies') - self.assertRaises(exception.InvalidInput, self.controller.create, req, - PROVIDER_ID_1, body=body) - - @mock.patch('karbor.objects.Plan.get_by_id') - @mock.patch('karbor.services.protection.api.API.list_checkpoints') - @mock.patch('karbor.services.protection.api.API.copy') - def test_copies_create_with_protection_copy_failed( - self, mock_copy, mock_list_checkpoints, mock_plan_get): - mock_plan_get.return_value = FakePlan( - {'id': DEFAULT_PLAN_ID, - 'provider_id': PROVIDER_ID_1, - 'parameters': {}}) - mock_list_checkpoints.return_value = ['fake_checkpoint_id'] - mock_copy.side_effect = exception.FlowError - copy = self._copy_in_request_body(DEFAULT_PLAN_ID, {}) - body = {"copy": copy} - req = fakes.HTTPRequest.blank('/v1/copies') - self.assertRaises(exception.FlowError, self.controller.create, - req, PROVIDER_ID_1, body=body) - - def _copy_in_request_body(self, plan_id, parameters): - return { - 'plan_id': plan_id, - 'parameters': parameters - } diff --git a/karbor/tests/unit/api/v1/test_operation_logs.py b/karbor/tests/unit/api/v1/test_operation_logs.py deleted file mode 100644 index 373072a5..00000000 --- a/karbor/tests/unit/api/v1/test_operation_logs.py +++ /dev/null @@ -1,78 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg -from webob import exc - -from karbor.api.v1 import operation_logs -from karbor import context -from karbor.tests import base -from karbor.tests.unit.api import fakes - -CONF = cfg.CONF - - -class OperationLogTest(base.TestCase): - def setUp(self): - super(OperationLogTest, self).setUp() - self.controller = operation_logs.OperationLogsController() - self.ctxt = context.RequestContext('demo', 'fakeproject', True) - - @mock.patch( - 'karbor.api.v1.operation_logs.' - 'OperationLogsController._get_all') - def test_operation_log_list_detail(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/operation_logs') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - @mock.patch( - 'karbor.api.v1.operation_logs.' - 'OperationLogsController._get_all') - def test_operation_log_index_limit_offset(self, mock_get_all): - req = fakes.HTTPRequest.blank( - '/v1/operation_logs?limit=2&offset=1') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - req = fakes.HTTPRequest.blank('/v1/operation_logs?limit=-1&offset=1') - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - req = fakes.HTTPRequest.blank('/v1/operation_logs?limit=a&offset=1') - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - url = '/v1/operation_logs?limit=2&offset=43543564546567575' - req = fakes.HTTPRequest.blank(url) - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - @mock.patch( - 'karbor.api.v1.operation_logs.' - 'OperationLogsController._operation_log_get') - def test_operation_log_show(self, mock_get): - req = fakes.HTTPRequest.blank('/v1/operation_logs') - self.controller.show(req, '2a9ce1f3-cc1a-4516-9435-0ebb13caa398') - self.assertTrue(mock_get.called) - - def test_operation_log_show_Invalid(self): - req = fakes.HTTPRequest.blank('/v1/operation_logs/1') - self.assertRaises( - exc.HTTPBadRequest, self.controller.show, - req, "1") diff --git a/karbor/tests/unit/api/v1/test_plans.py b/karbor/tests/unit/api/v1/test_plans.py deleted file mode 100644 index 32f2458e..00000000 --- a/karbor/tests/unit/api/v1/test_plans.py +++ /dev/null @@ -1,309 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg -from webob import exc - -from karbor.api.v1 import plans -from karbor.common import constants -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - -CONF = cfg.CONF - -DEFAULT_NAME = 'My 3 tier application' -DEFAULT_DESCRIPTION = 'My 3 tier application protection plan' -DEFAULT_PROVIDER_ID = 'efc6a88b-9096-4bb6-8634-cda182a6e12a' -DEFAULT_PROJECT_ID = '39bb894794b741e982bd26144d2949f6' -DEFAULT_RESOURCES = [{'id': 'efc6a88b-9096-4bb6-8634-cda182a6e144', - "type": "OS::Cinder::Volume", "name": "name1"}] -DEFAULT_PARAMETERS = {"OS::Cinder::Volume": {"backup_name": "name"}} - - -class PlanApiTest(base.TestCase): - def setUp(self): - super(PlanApiTest, self).setUp() - self.controller = plans.PlansController() - self.ctxt = context.RequestContext('demo', - DEFAULT_PROJECT_ID, True) - - @mock.patch( - 'karbor.services.protection.rpcapi.ProtectionAPI.show_provider') - @mock.patch( - 'karbor.objects.plan.Plan.create') - def test_plan_create(self, mock_plan_create, mock_provider): - plan = self._plan_in_request_body() - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - mock_provider.return_value = fakes.PROVIDER_OS - self.controller.create(req, body=body) - self.assertTrue(mock_plan_create.called) - - def test_plan_create_InvalidBody(self): - plan = self._plan_in_request_body() - body = {"planxx": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises(exception.ValidationError, self.controller.create, - req, body=body) - - def test_plan_create_InvalidProviderId(self): - plan = self._plan_in_request_body( - name=DEFAULT_NAME, - description=DEFAULT_DESCRIPTION, - provider_id="", - resources=[]) - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises(exception.ValidationError, self.controller.create, - req, body=body) - - def test_plan_create_InvalidResources(self): - plan = self._plan_in_request_body( - name=DEFAULT_NAME, - description=DEFAULT_DESCRIPTION, - provider_id=DEFAULT_PROVIDER_ID, - resources=[]) - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises(exception.InvalidInput, self.controller.create, - req, body=body) - - @mock.patch( - 'karbor.services.protection.rpcapi.ProtectionAPI.show_provider') - def test_plan_create_InvalidParameters(self, mock_provider): - parameters = {"OS::Cinder::Volume": {"test": "os"}} - plan = self._plan_in_request_body( - name=DEFAULT_NAME, - description=DEFAULT_DESCRIPTION, - provider_id=DEFAULT_PROVIDER_ID, - parameters=parameters) - body = {"plan": plan} - mock_provider.return_value = fakes.PROVIDER_OS - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises(exc.HTTPBadRequest, self.controller.create, - req, body=body) - - @mock.patch( - 'karbor.services.protection.rpcapi.ProtectionAPI.show_provider') - def test_plan_create_InvalidProvider(self, mock_provider): - plan = self._plan_in_request_body() - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - mock_provider.side_effect = exception.NotFound() - self.assertRaises(exc.HTTPBadRequest, self.controller.create, - req, body=body) - - @mock.patch( - 'karbor.services.protection.rpcapi.ProtectionAPI.show_provider') - def test_plan_create_InvalidProvider_and_no_parameters_specify( - self, mock_provider): - plan = self._plan_in_request_body(parameters={}) - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - mock_provider.side_effect = exception.NotFound() - self.assertRaises(exc.HTTPBadRequest, self.controller.create, - req, body=body) - - @mock.patch( - 'karbor.api.v1.plans.PlansController._plan_get') - @mock.patch( - 'karbor.api.v1.plans.PlansController._plan_update') - def test_plan_update(self, mock_plan_update, mock_plan_get): - plan = self._plan_update_request_body() - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - self.controller.update( - req, "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", body=body) - self.assertTrue(mock_plan_update.called) - self.assertTrue(mock_plan_get.called) - - def test_plan_update_InvalidBody(self): - plan = self._plan_update_request_body() - body = {"planxx": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises( - exception.ValidationError, self.controller.update, - req, "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", body=body) - body = {"plan": {}} - self.assertRaises( - exc.HTTPBadRequest, self.controller.update, - req, "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", body=body) - - def test_plan_update_InvalidId(self): - plan = self._plan_update_request_body() - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises( - exc.HTTPNotFound, self.controller.update, - req, "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", body=body) - - def test_plan_update_InvalidResources(self): - plan = self._plan_update_request_body( - name=DEFAULT_NAME, - resources=[{'key1': 'value1'}]) - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises( - exception.InvalidInput, self.controller.update, - req, "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", body=body) - - @mock.patch( - 'karbor.api.v1.plans.PlansController._get_all') - def test_plan_list_detail(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/plans') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - @mock.patch( - 'karbor.api.v1.plans.PlansController._get_all') - def test_plan_index_limit_offset(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/plans?limit=2&offset=1') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - req = fakes.HTTPRequest.blank('/v1/plans?limit=-1&offset=1') - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - req = fakes.HTTPRequest.blank('/v1/plans?limit=a&offset=1') - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - url = '/v1/plans?limit=2&offset=43543564546567575' - req = fakes.HTTPRequest.blank(url) - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - @mock.patch( - 'karbor.services.protection.rpcapi.ProtectionAPI.show_provider') - def test_plan_create_empty_dict(self, mock_provider): - mock_provider.return_value = fakes.PROVIDER_OS - plan = self._plan_in_request_body(parameters={}) - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - self.controller.create(req, body=body) - - @mock.patch( - 'karbor.api.v1.plans.PlansController._plan_get') - def test_plan_show(self, mock_plan_get): - req = fakes.HTTPRequest.blank('/v1/plans') - self.controller.show(req, '2a9ce1f3-cc1a-4516-9435-0ebb13caa398') - self.assertTrue(mock_plan_get.called) - - def test_plan_show_Invalid(self): - req = fakes.HTTPRequest.blank('/v1/plans/1') - self.assertRaises( - exc.HTTPBadRequest, self.controller.show, - req, "1") - - def test_plan_show_InvalidPlanId(self): - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises( - exc.HTTPNotFound, self.controller.show, - req, "2a9ce1f3-cc1a-4516-9435-0ebb13caa398") - - @mock.patch( - 'karbor.api.v1.plans.PlansController._plan_get') - def test_plan_delete(self, mock_plan_get): - req = fakes.HTTPRequest.blank('/v1/plans') - self.controller.delete(req, '2a9ce1f3-cc1a-4516-9435-0ebb13caa398') - self.assertTrue(mock_plan_get.called) - - def test_plan_delete_Invalid(self): - req = fakes.HTTPRequest.blank('/v1/plans/1') - self.assertRaises( - exc.HTTPBadRequest, self.controller.delete, - req, "1") - - def test_plan_delete_InvalidPlanId(self): - req = fakes.HTTPRequest.blank('/v1/plans') - self.assertRaises( - exc.HTTPNotFound, self.controller.delete, - req, "2a9ce1f3-cc1a-4516-9435-0ebb13caa398") - - @mock.patch( - 'karbor.api.v1.plans.PlansController._plan_get') - def test_plan_delete_authorize_failed( - self, mock_plan_get): - plan = self._plan_in_request_body() - plan['project_id'] = DEFAULT_PROJECT_ID - req = fakes.HTTPRequest.blank('/v1/plans') - mock_plan_get.return_value = plan - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.delete, req, - "2a9ce1f3-cc1a-4516-9435-0ebb13caa398") - - @mock.patch( - 'karbor.api.v1.plans.PlansController._plan_get') - def test_plan_update_InvalidStatus( - self, mock_plan_get): - plan = self._plan_update_request_body( - name=DEFAULT_NAME, - status=constants.PLAN_STATUS_STARTED, - resources=DEFAULT_RESOURCES) - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - mock_plan_get.return_value = plan - self.assertRaises(exception.InvalidPlan, - self.controller.update, req, - "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - body=body) - - @mock.patch( - 'karbor.api.v1.plans.PlansController._plan_get') - def test_plan_update_InvalidInput( - self, mock_plan_get): - plan = self._plan_update_request_body( - name=DEFAULT_NAME, - status=constants.PLAN_STATUS_SUSPENDED, - resources=DEFAULT_RESOURCES) - body = {"plan": plan} - req = fakes.HTTPRequest.blank('/v1/plans') - mock_plan_get.return_value = plan - self.assertRaises(exception.InvalidInput, - self.controller.update, req, - "2a9ce1f3-cc1a-4516-9435-0ebb13caa398", - body=body) - - def _plan_in_request_body(self, name=DEFAULT_NAME, - description=DEFAULT_DESCRIPTION, - provider_id=DEFAULT_PROVIDER_ID, - resources=DEFAULT_RESOURCES, - parameters=DEFAULT_PARAMETERS): - plan_req = { - 'name': name, - 'description': description, - 'provider_id': provider_id, - 'resources': resources, - 'parameters': parameters, - } - - return plan_req - - def _plan_update_request_body(self, name=DEFAULT_NAME, - status=constants.PLAN_STATUS_STARTED, - resources=DEFAULT_RESOURCES): - plan_req = { - 'name': name, - 'resources': resources, - 'status': status, - } - - return plan_req diff --git a/karbor/tests/unit/api/v1/test_protectables.py b/karbor/tests/unit/api/v1/test_protectables.py deleted file mode 100644 index 0679b872..00000000 --- a/karbor/tests/unit/api/v1/test_protectables.py +++ /dev/null @@ -1,178 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg -from webob import exc - -from karbor.api.v1 import protectables -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - -CONF = cfg.CONF - - -class ProtectablesApiTest(base.TestCase): - def setUp(self): - super(ProtectablesApiTest, self).setUp() - self.controller = protectables.ProtectablesController() - self.ctxt = context.RequestContext('demo', 'fakeproject', True) - - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_list_detail(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/protectables') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - @mock.patch( - 'karbor.services.protection.api.API.show_protectable_type') - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_show( - self, mock_get_all, mock_show_protectable_type): - req = fakes.HTTPRequest.blank('/v1/protectables') - mock_get_all.return_value = ["OS::Keystone::Project"] - self.controller.show(req, 'OS::Keystone::Project') - self.assertTrue(mock_get_all.called) - self.assertTrue(mock_show_protectable_type.called) - - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_show_Invalid(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/protectables') - mock_get_all.return_value = ["OS::Keystone::Project"] - self.assertRaises(exception.InvalidInput, self.controller.show, - req, "1") - self.assertTrue(mock_get_all.called) - - @mock.patch( - 'karbor.services.protection.api.API.' - 'list_protectable_instances') - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_instances_index(self, mock_get_all, - mock_list_protectable_instances): - req = fakes.HTTPRequest.blank('/v1/protectables') - mock_get_all.return_value = ["OS::Keystone::Project"] - self.controller.instances_index(req, 'OS::Keystone::Project') - self.assertTrue(mock_get_all.called) - self.assertTrue(mock_list_protectable_instances.called) - - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_instances_index_Invalid(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/protectables') - mock_get_all.return_value = ["OS::Keystone::Project"] - self.assertRaises(exception.InvalidInput, - self.controller.instances_index, - req, 'abc') - - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_instances_index_InvalidPara(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/protectables?parameters=abc') - mock_get_all.return_value = ["OS::Keystone::Project"] - self.assertRaises(exception.InvalidInput, - self.controller.instances_index, - req, 'OS::Keystone::Project') - - @mock.patch( - 'karbor.services.protection.api.API.' - 'list_protectable_instances') - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_instances_index_InvalidInstance( - self, mock_get_all, - mock_list_protectable_instances): - req = fakes.HTTPRequest.blank('/v1/protectables') - mock_get_all.return_value = ["OS::Keystone::Project"] - mock_list_protectable_instances.return_value = [{"name": "abc"}] - self.assertRaises(exception.InvalidProtectableInstance, - self.controller.instances_index, - req, 'OS::Keystone::Project') - - @mock.patch( - 'karbor.services.protection.api.API.' - 'list_protectable_dependents') - @mock.patch( - 'karbor.services.protection.api.API.' - 'show_protectable_instance') - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_instances_show(self, mock_get_all, - mock_show_protectable_instance, - mock_list_protectable_dependents): - req = fakes.HTTPRequest.blank('/v1/protectables') - mock_get_all.return_value = ["OS::Keystone::Project"] - self.controller.instances_show( - req, - 'OS::Keystone::Project', - 'efc6a88b-9096-4bb6-8634-cda182a6e12a', - ) - self.assertTrue(mock_get_all.called) - self.assertTrue(mock_show_protectable_instance.called) - self.assertTrue(mock_list_protectable_dependents.called) - - def test_protectables_instances_show_InvalidParam(self): - req = fakes.HTTPRequest.blank('/v1/protectables?parameters=abc') - self.assertRaises(exception.InvalidInput, - self.controller.instances_show, - req, - 'OS::Keystone::Project', - 'efc6a88b-9096-4bb6-8634-cda182a6e12a') - - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_instances_show_InvalidType(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/protectables') - mock_get_all.return_value = ["OS::Keystone::Project"] - self.assertRaises(exception.InvalidInput, - self.controller.instances_show, - req, - 'abc', - 'efc6a88b-9096-4bb6-8634-cda182a6e12a') - - @mock.patch( - 'karbor.services.protection.api.API.' - 'show_protectable_instance') - @mock.patch( - 'karbor.api.v1.protectables.ProtectablesController._get_all') - def test_protectables_instances_show_Invalid( - self, - mock_get_all, - mock_show_protectable_instance): - req = fakes.HTTPRequest.blank('/v1/protectables') - mock_get_all.return_value = ["OS::Keystone::Project"] - mock_show_protectable_instance.side_effect = \ - exception.ProtectableResourceNotFound - self.assertRaises(exc.HTTPNotFound, - self.controller.instances_show, - req, - 'OS::Keystone::Project', - 'efc6a88b-9096-4bb6-8634-cda182a6e12a') - mock_show_protectable_instance.side_effect = exception.KarborException - self.assertRaises(exc.HTTPInternalServerError, - self.controller.instances_show, - req, - 'OS::Keystone::Project', - 'efc6a88b-9096-4bb6-8634-cda182a6e12a') - mock_show_protectable_instance.return_value = None - self.assertRaises(exc.HTTPInternalServerError, - self.controller.instances_show, - req, - 'OS::Keystone::Project', - 'efc6a88b-9096-4bb6-8634-cda182a6e12a') diff --git a/karbor/tests/unit/api/v1/test_providers.py b/karbor/tests/unit/api/v1/test_providers.py deleted file mode 100644 index eabfd7ec..00000000 --- a/karbor/tests/unit/api/v1/test_providers.py +++ /dev/null @@ -1,293 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg -from webob import exc - -from karbor.api.v1 import providers -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - -CONF = cfg.CONF - - -class ProvidersApiTest(base.TestCase): - def setUp(self): - super(ProvidersApiTest, self).setUp() - self.controller = providers.ProvidersController() - self.ctxt = context.RequestContext('demo', 'fakeproject', True) - - @mock.patch( - 'karbor.api.v1.providers.ProvidersController._get_all') - def test_providers_list_detail(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/providers') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - @mock.patch( - 'karbor.services.protection.api.API.show_provider') - def test_providers_show(self, mock_show_provider): - req = fakes.HTTPRequest.blank('/v1/providers') - self.controller.show(req, '2220f8b1-975d-4621-a872-fa9afb43cb6c') - self.assertTrue(mock_show_provider.called) - - def test_providers_show_Invalid(self): - req = fakes.HTTPRequest.blank('/v1/providers') - self.assertRaises(exc.HTTPBadRequest, self.controller.show, - req, "1") - - @mock.patch( - 'karbor.services.protection.api.API.' - 'show_checkpoint') - def test_checkpoint_show(self, mock_show_checkpoint): - req = fakes.HTTPRequest.blank('/v1/providers/' - '{provider_id}/checkpoints/') - mock_show_checkpoint.return_value = { - "provider_id": "efc6a88b-9096-4bb6-8634-cda182a6e12a", - "project_id": "446a04d8-6ff5-4e0e-99a4-827a6389e9ff", - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c" - } - self.controller.checkpoints_show( - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '2220f8b1-975d-4621-a872-fa9afb43cb6c' - ) - self.assertTrue(mock_show_checkpoint.called) - - @mock.patch( - 'karbor.services.protection.api.API.' - 'show_checkpoint') - def test_checkpoint_show_Invalid(self, mock_show_checkpoint): - req = fakes.HTTPRequest.blank('/v1/providers/' - '{provider_id}/checkpoints/') - mock_show_checkpoint.return_value = { - "provider_id": "efc6a88b-9096-4bb6-8634-cda182a6e12a", - "project_id": "446a04d8-6ff5-4e0e-99a4-827a6389e9ff", - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c" - } - self.assertRaises(exc.HTTPBadRequest, self.controller.checkpoints_show, - req, '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '1') - - @mock.patch( - 'karbor.services.protection.api.API.' - 'list_checkpoints') - def test_checkpoint_index(self, mock_list_checkpoints): - req = fakes.HTTPRequest.blank('/v1/providers/' - '{provider_id}/checkpoints/') - mock_list_checkpoints.return_value = [ - { - "provider_id": "efc6a88b-9096-4bb6-8634-cda182a6e12a", - "project_id": "446a04d8-6ff5-4e0e-99a4-827a6389e9ff", - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c" - } - ] - self.controller.checkpoints_index( - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c') - self.assertTrue(mock_list_checkpoints.called) - - @mock.patch('karbor.quota.QuotaEngine.commit') - @mock.patch('karbor.quota.QuotaEngine.reserve') - @mock.patch('karbor.services.protection.api.API.show_checkpoint') - @mock.patch('karbor.services.protection.api.API.delete') - def test_checkpoints_delete(self, mock_delete, mock_show_checkpoint, - mock_reserve, mock_commit): - req = fakes.HTTPRequest.blank('/v1/providers/' - '{provider_id}/checkpoints/') - mock_show_checkpoint.return_value = { - "provider_id": "efc6a88b-9096-4bb6-8634-cda182a6e12a", - "project_id": "446a04d8-6ff5-4e0e-99a4-827a6389e9ff", - "id": "2220f8b1-975d-4621-a872-fa9afb43cb6c" - } - self.controller.checkpoints_delete( - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '2220f8b1-975d-4621-a872-fa9afb43cb6c') - self.assertTrue(mock_delete.called) - self.assertTrue(mock_reserve.called) - self.assertTrue(mock_commit.called) - - @mock.patch('karbor.quota.QuotaEngine.commit') - @mock.patch('karbor.quota.QuotaEngine.reserve') - @mock.patch('karbor.services.protection.api.API.protect') - @mock.patch('karbor.objects.plan.Plan.get_by_id') - def test_checkpoints_create(self, mock_plan_get, mock_protect, - mock_reserve, mock_commit): - checkpoint = { - "plan_id": "2c3a12ee-5ea6-406a-8b64-862711ff85e6" - } - body = {"checkpoint": checkpoint} - req = fakes.HTTPRequest.blank('/v1/providers/' - '{provider_id}/checkpoints/') - mock_plan_get.return_value = { - "plan_id": "2c3a12ee-5ea6-406a-8b64-862711ff85e6", - "provider_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c" - } - mock_protect.return_value = { - "checkpoint_id": "2c3a12ee-5ea6-406a-8b64-862711ff85e6" - } - self.controller.checkpoints_create( - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body=body) - self.assertTrue(mock_plan_get.called) - self.assertTrue(mock_reserve.called) - self.assertTrue(mock_protect.called) - self.assertTrue(mock_commit.called) - - @mock.patch('karbor.quota.process_reserve_over_quota') - @mock.patch('karbor.quota.QuotaEngine.reserve') - @mock.patch('karbor.services.protection.api.API.protect') - @mock.patch('karbor.objects.plan.Plan.get_by_id') - def test_checkpoints_create_with_over_quota_exception( - self, mock_plan_get, mock_protect, mock_quota_reserve, - mock_process_reserve_over_quota): - checkpoint = {"plan_id": "2c3a12ee-5ea6-406a-8b64-862711ff85e6"} - body = {"checkpoint": checkpoint} - req = fakes.HTTPRequest.blank('/v1/providers/' - '{provider_id}/checkpoints/') - mock_plan_get.return_value = { - "plan_id": "2c3a12ee-5ea6-406a-8b64-862711ff85e6", - "provider_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c" - } - mock_protect.return_value = { - "checkpoint_id": "2c3a12ee-5ea6-406a-8b64-862711ff85e6" - } - mock_quota_reserve.side_effect = exception.OverQuota - self.controller.checkpoints_create( - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body=body) - self.assertTrue(mock_process_reserve_over_quota.called) - - @mock.patch('karbor.quota.QuotaEngine.rollback') - @mock.patch('karbor.services.protection.api.API.protect') - @mock.patch('karbor.objects.plan.Plan.get_by_id') - def test_checkpoint_create_failed_with_protection_exception( - self, mock_plan_get, mock_protect, mock_quota_rollback): - checkpoint = {"plan_id": "2c3a12ee-5ea6-406a-8b64-862711ff85e6"} - body = {"checkpoint": checkpoint} - req = fakes.HTTPRequest.blank('/v1/providers/' - '{provider_id}/checkpoints/') - mock_plan_get.return_value = { - "plan_id": "2c3a12ee-5ea6-406a-8b64-862711ff85e6", - "provider_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c" - } - mock_protect.side_effect = Exception - self.assertRaises( - exc.HTTPBadRequest, - self.controller.checkpoints_create, - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body=body) - self.assertTrue(mock_quota_rollback.called) - - @mock.patch('karbor.services.protection.api.API.reset_state') - def test_checkpoints_update_reset_state(self, mock_reset_state): - req = fakes.HTTPRequest.blank('/v1/providers/{provider_id}/' - 'checkpoints/{checkpoint_id}') - body = { - 'os-resetState': {'state': 'error'} - } - self.controller.checkpoints_update( - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body=body) - self.assertTrue(mock_reset_state.called) - - def test_checkpoints_update_reset_state_with_invalid_provider_id(self): - req = fakes.HTTPRequest.blank('/v1/providers/{provider_id}/' - 'checkpoints/{checkpoint_id}') - body = { - 'os-resetState': {'state': 'error'} - } - self.assertRaises( - exc.HTTPBadRequest, - self.controller.checkpoints_update, - req, - 'invalid_provider_id', - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body=body) - - def test_checkpoints_update_reset_state_with_invalid_checkpoint_id(self): - req = fakes.HTTPRequest.blank('/v1/providers/{provider_id}/' - 'checkpoints/{checkpoint_id}') - body = { - 'os-resetState': {'state': 'error'} - } - self.assertRaises( - exc.HTTPBadRequest, - self.controller.checkpoints_update, - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - 'invalid_checkpoint_id', - body=body) - - def test_checkpoints_update_reset_state_with_invalid_body(self): - req = fakes.HTTPRequest.blank('/v1/providers/{provider_id}/' - 'checkpoints/{checkpoint_id}') - self.assertRaises( - exception.ValidationError, - self.controller.checkpoints_update, - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body={}) - self.assertRaises( - exception.ValidationError, - self.controller.checkpoints_update, - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body={'os-resetState': {'state': 'invalid_state'}}) - - @mock.patch('karbor.services.protection.api.API.reset_state') - def test_checkpoints_update_reset_state_with_protection_api_exceptions( - self, mock_reset_state): - req = fakes.HTTPRequest.blank('/v1/providers/{provider_id}/' - 'checkpoints/{checkpoint_id}') - body = { - 'os-resetState': {'state': 'error'} - } - mock_reset_state.side_effect = exception.AccessCheckpointNotAllowed( - checkpoint_id='2220f8b1-975d-4621-a872-fa9afb43cb6c') - self.assertRaises(exc.HTTPForbidden, - self.controller.checkpoints_update, - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body=body) - - mock_reset_state.side_effect = exception.CheckpointNotFound( - checkpoint_id='2220f8b1-975d-4621-a872-fa9afb43cb6c') - self.assertRaises(exc.HTTPNotFound, - self.controller.checkpoints_update, - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body=body) - - mock_reset_state.side_effect = exception.CheckpointNotBeReset( - checkpoint_id='2220f8b1-975d-4621-a872-fa9afb43cb6c') - self.assertRaises(exc.HTTPBadRequest, - self.controller.checkpoints_update, - req, - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - '2220f8b1-975d-4621-a872-fa9afb43cb6c', - body=body) diff --git a/karbor/tests/unit/api/v1/test_quota_classes.py b/karbor/tests/unit/api/v1/test_quota_classes.py deleted file mode 100644 index ef0e8267..00000000 --- a/karbor/tests/unit/api/v1/test_quota_classes.py +++ /dev/null @@ -1,112 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg -from webob import exc - -from karbor.api.v1 import quota_classes -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - -CONF = cfg.CONF - - -class QuotaClassApiTest(base.TestCase): - def setUp(self): - super(QuotaClassApiTest, self).setUp() - self.controller = quota_classes.QuotaClassesController() - self.ctxt = context.RequestContext('demo', 'fakeproject', True) - - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_class_update') - def test_quota_update(self, mock_quota_update): - quota_class = self._quota_in_request_body() - body = {"quota_class": quota_class} - req = fakes.HTTPRequest.blank( - '/v1/quota_classes/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.update( - req, '73f74f90a1754bd7ad658afb3272323f', body=body) - self.assertTrue(mock_quota_update.called) - - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_class_create') - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_class_update') - def test_quota_update_with_quota_not_found(self, - mock_quota_class_update, - mock_quota_class_create): - body = {"quota_class": {"plans": 20}} - req = fakes.HTTPRequest.blank( - '/v1/quotas_classes/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - mock_quota_class_update.side_effect = exception.QuotaClassNotFound - self.controller.update( - req, '73f74f90a1754bd7ad658afb3272323f', body=body) - self.assertTrue(mock_quota_class_create.called) - - def test_quota_update_with_invalid_type_value(self): - body = {"quota_class": {"plans": "fakevalue"}} - req = fakes.HTTPRequest.blank( - '/v1/quota_classes/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.assertRaises(exc.HTTPBadRequest, self.controller.update, - req, "73f74f90a1754bd7ad658afb3272323f", body=body) - - def test_quota_update_with_invalid_num_value(self): - body = {"quota_class": {"plans": -2}} - req = fakes.HTTPRequest.blank( - '/v1/quota_classes/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.assertRaises(exc.HTTPBadRequest, self.controller.update, - req, "73f74f90a1754bd7ad658afb3272323f", body=body) - - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_class_update') - def test_quota_update_with_invalid_key(self, mock_quota_class_update): - body = {"quota_class": {"fakekey": 20}} - req = fakes.HTTPRequest.blank( - '/v1/quota_classes/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.update( - req, '73f74f90a1754bd7ad658afb3272323f', body=body) - self.assertEqual(0, - len(mock_quota_class_update.mock_calls)) - - @mock.patch( - 'karbor.quota.DbQuotaDriver.get_class_quotas') - def test_quota_show(self, mock_quota_get): - req = fakes.HTTPRequest.blank( - '/v1/quota_classes/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.show( - req, '73f74f90a1754bd7ad658afb3272323f') - self.assertTrue(mock_quota_get.called) - - def test_quota_show_with_project_authorize_failed(self): - req = fakes.HTTPRequest.blank( - '/v1/quota_classes/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=False) - self.assertRaises( - exc.HTTPForbidden, self.controller.show, - req, '73f74f90a1754bd7ad658afb3272323f') - - def _quota_in_request_body(self): - quota_req = { - "plans": 20, - } - return quota_req diff --git a/karbor/tests/unit/api/v1/test_quotas.py b/karbor/tests/unit/api/v1/test_quotas.py deleted file mode 100644 index bb6e56f7..00000000 --- a/karbor/tests/unit/api/v1/test_quotas.py +++ /dev/null @@ -1,215 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg -from webob import exc - -from karbor.api.v1 import quotas -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - -CONF = cfg.CONF -INVALID_PROJECT_ID = '111' - - -class QuotaApiTest(base.TestCase): - def setUp(self): - super(QuotaApiTest, self).setUp() - self.controller = quotas.QuotasController() - self.ctxt = context.RequestContext('demo', 'fakeproject', True) - - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_update') - def test_quota_update(self, mock_quota_update): - quota = self._quota_in_request_body() - body = {"quota": quota} - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.update( - req, '73f74f90a1754bd7ad658afb3272323f', body=body) - self.assertTrue(mock_quota_update.called) - - def test_quota_update_with_invalid_project_id(self): - quota = self._quota_in_request_body() - body = {"quota": quota} - req = fakes.HTTPRequest.blank( - '/v1/quotas/111', use_admin_context=True) - self.assertRaises(exc.HTTPBadRequest, self.controller.update, - req, INVALID_PROJECT_ID, body=body) - - def test_quota_update_with_invalid_type_value(self): - body = {"quota": {"plans": "fakevalue"}} - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.assertRaises(exc.HTTPBadRequest, self.controller.update, - req, "73f74f90a1754bd7ad658afb3272323f", body=body) - - def test_quota_update_with_invalid_num_value(self): - body = {"quota": {"plans": -2}} - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.assertRaises(exc.HTTPBadRequest, self.controller.update, - req, "73f74f90a1754bd7ad658afb3272323f", body=body) - - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_update') - def test_quota_update_with_zero_value(self, mock_quota_update): - body = {"quota": {"plans": 0}} - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.update( - req, '73f74f90a1754bd7ad658afb3272323f', body=body) - self.assertTrue(mock_quota_update.called) - - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_update') - def test_quota_update_with_invalid_key(self, mock_quota_update): - body = {"quota": {"fakekey": 20}} - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.update( - req, '73f74f90a1754bd7ad658afb3272323f', body=body) - self.assertEqual(0, - len(mock_quota_update.mock_calls)) - - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_create') - @mock.patch( - 'karbor.db.sqlalchemy.api.quota_update') - def test_quota_update_with_project_quota_not_found(self, - mock_quota_update, - mock_quota_create): - body = {"quota": {"plans": 20}} - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - mock_quota_update.side_effect = exception.ProjectQuotaNotFound - self.controller.update( - req, '73f74f90a1754bd7ad658afb3272323f', body=body) - self.assertTrue(mock_quota_create.called) - - def test_quota_update_with_not_admin_context(self): - body = {"quota": {"plans": 20}} - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=False) - self.assertRaises( - exception.PolicyNotAuthorized, self.controller.update, - req, "73f74f90a1754bd7ad658afb3272323f", body=body) - - @mock.patch( - 'karbor.quota.DbQuotaDriver.get_defaults') - def test_quota_defaults(self, mock_quota_get): - req = fakes.HTTPRequest.blank( - 'v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.defaults( - req, '73f74f90a1754bd7ad658afb3272323f') - self.assertTrue(mock_quota_get.called) - - def test_quota_defaults_with_invalid_project_id(self): - req = fakes.HTTPRequest.blank('/v1/quotas/111', - use_admin_context=True) - self.assertRaises( - exc.HTTPBadRequest, self.controller.defaults, - req, INVALID_PROJECT_ID) - - @mock.patch( - 'karbor.quota.DbQuotaDriver.get_project_quotas') - def test_quota_detail(self, mock_quota_get): - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.detail( - req, '73f74f90a1754bd7ad658afb3272323f') - self.assertTrue(mock_quota_get.called) - - def test_quota_detail_with_invalid_project_id(self): - req = fakes.HTTPRequest.blank('/v1/quotas/111', - use_admin_context=True) - self.assertRaises( - exc.HTTPBadRequest, self.controller.detail, - req, INVALID_PROJECT_ID) - - def test_quota_detail_with_project_authorize_failed(self): - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=False) - self.assertRaises( - exc.HTTPForbidden, self.controller.detail, - req, '73f74f90a1754bd7ad658afb3272323f') - - @mock.patch( - 'karbor.quota.DbQuotaDriver.get_project_quotas') - def test_quota_show(self, mock_quota_get): - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.show( - req, '73f74f90a1754bd7ad658afb3272323f') - self.assertTrue(mock_quota_get.called) - - def test_quota_show_invalid(self): - req = fakes.HTTPRequest.blank('/v1/quotas/1', - use_admin_context=True) - self.assertRaises( - exc.HTTPBadRequest, self.controller.show, - req, "1") - - def test_quota_show_with_project_authorize_failed(self): - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=False) - self.assertRaises( - exc.HTTPForbidden, self.controller.show, - req, '73f74f90a1754bd7ad658afb3272323f') - - @mock.patch( - 'karbor.quota.DbQuotaDriver.destroy_all_by_project') - def test_quota_delete(self, mock_restore_get): - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=True) - self.controller.delete( - req, '73f74f90a1754bd7ad658afb3272323f') - self.assertTrue(mock_restore_get.called) - - def test_quota_delete_with_invalid_project_id(self): - req = fakes.HTTPRequest.blank('/v1/quotas/1', - use_admin_context=True) - self.assertRaises( - exc.HTTPBadRequest, self.controller.delete, - req, "1") - - def test_quota_delete_with_non_admin_context(self): - req = fakes.HTTPRequest.blank( - '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', - use_admin_context=False) - self.assertRaises( - exception.PolicyNotAuthorized, self.controller.delete, - req, "73f74f90a1754bd7ad658afb3272323f") - - def _quota_in_request_body(self): - quota_req = { - "plans": 20, - } - return quota_req diff --git a/karbor/tests/unit/api/v1/test_restores.py b/karbor/tests/unit/api/v1/test_restores.py deleted file mode 100644 index cfafcab9..00000000 --- a/karbor/tests/unit/api/v1/test_restores.py +++ /dev/null @@ -1,149 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg -from webob import exc - -from karbor.api.v1 import restores -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - -CONF = cfg.CONF - -DEFAULT_PROVIDER_ID = 'efc6a88b-9096-4bb6-8634-cda182a6e12a' -DEFAULT_CHECKPOINT_ID = '09edcbdc-d1c2-49c1-a212-122627b20968' -DEFAULT_RESTORE_TARGET = '192.168.1.2/identity/' -DEFAULT_RESTORE_AUTH = { - 'type': 'password', - 'username': 'demo', - 'password': 'test', -} -DEFAULT_PARAMETERS = { -} - - -class RestoreApiTest(base.TestCase): - def setUp(self): - super(RestoreApiTest, self).setUp() - self.controller = restores.RestoresController() - self.ctxt = context.RequestContext('demo', 'fakeproject', True) - - @mock.patch( - 'karbor.services.protection.api.API.restore') - @mock.patch( - 'karbor.objects.restore.Restore.create') - def test_restore_create(self, mock_restore_create, - mock_rpc_restore): - restore = self._restore_in_request_body() - body = {"restore": restore} - req = fakes.HTTPRequest.blank('/v1/restores') - self.controller.create(req, body=body) - self.assertTrue(mock_restore_create.called) - self.assertTrue(mock_rpc_restore.called) - - def test_restore_create_InvalidBody(self): - restore = self._restore_in_request_body() - body = {"restorexx": restore} - req = fakes.HTTPRequest.blank('/v1/restores') - self.assertRaises(exception.ValidationError, self.controller.create, - req, body=body) - - def test_restore_create_InvalidProviderId(self): - restore = self._restore_in_request_body(provider_id="") - body = {"restore": restore} - req = fakes.HTTPRequest.blank('/v1/restores') - self.assertRaises(exception.ValidationError, self.controller.create, - req, body=body) - - def test_restore_create_Invalidcheckpoint_id(self): - restore = self._restore_in_request_body(checkpoint_id="") - body = {"restore": restore} - req = fakes.HTTPRequest.blank('/v1/restores') - self.assertRaises(exception.ValidationError, self.controller.create, - req, body=body) - - @mock.patch('karbor.services.protection.api.API.restore') - def test_restore_create_with_checkpoint_not_allowed_exception( - self, mock_restore): - mock_restore.side_effect = exception.AccessCheckpointNotAllowed - restore = self._restore_in_request_body() - body = {"restore": restore} - req = fakes.HTTPRequest.blank('/v1/restores') - self.assertRaises(exc.HTTPForbidden, self.controller.create, - req, body=body) - - @mock.patch( - 'karbor.api.v1.restores.RestoresController._get_all') - def test_restore_list_detail(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/restores') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - @mock.patch( - 'karbor.api.v1.restores.RestoresController._get_all') - def test_restore_index_limit_offset(self, mock_get_all): - req = fakes.HTTPRequest.blank( - '/v1/restores?limit=2&offset=1') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - req = fakes.HTTPRequest.blank('/v1/restores?limit=-1&offset=1') - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - req = fakes.HTTPRequest.blank('/v1/restores?limit=a&offset=1') - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - url = '/v1/restores?limit=2&offset=43543564546567575' - req = fakes.HTTPRequest.blank(url) - self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - @mock.patch( - 'karbor.api.v1.restores.RestoresController.' - '_restore_get') - def test_restore_show(self, mock_restore_get): - req = fakes.HTTPRequest.blank('/v1/restores') - self.controller.show( - req, '2a9ce1f3-cc1a-4516-9435-0ebb13caa398') - self.assertTrue(mock_restore_get.called) - - def test_restore_show_Invalid(self): - req = fakes.HTTPRequest.blank('/v1/restores/1') - self.assertRaises( - exc.HTTPBadRequest, self.controller.show, - req, "1") - - def _restore_in_request_body( - self, provider_id=DEFAULT_PROVIDER_ID, - checkpoint_id=DEFAULT_CHECKPOINT_ID, - restore_target=DEFAULT_RESTORE_TARGET, - restore_auth=DEFAULT_RESTORE_AUTH, - parameters=DEFAULT_PARAMETERS): - restore_req = { - 'provider_id': provider_id, - 'checkpoint_id': checkpoint_id, - 'restore_target': restore_target, - 'restore_auth': restore_auth, - 'parameters': parameters, - } - - return restore_req diff --git a/karbor/tests/unit/api/v1/test_router.py b/karbor/tests/unit/api/v1/test_router.py deleted file mode 100644 index b6f17e65..00000000 --- a/karbor/tests/unit/api/v1/test_router.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from six.moves import http_client - -from karbor.api.openstack import ProjectMapper -from karbor.api.v1 import router -from karbor.tests import base -from karbor.tests.unit.api import fakes - - -class PlansRouterTestCase(base.TestCase): - def setUp(self): - super(PlansRouterTestCase, self).setUp() - mapper = ProjectMapper() - self.app = router.APIRouter(mapper) - - def test_plans(self): - req = fakes.HTTPRequest.blank('/fakeproject/plans') - req.method = 'GET' - req.content_type = 'application/json' - response = req.get_response(self.app) - self.assertEqual(http_client.OK, response.status_int) diff --git a/karbor/tests/unit/api/v1/test_scheduled_operation.py b/karbor/tests/unit/api/v1/test_scheduled_operation.py deleted file mode 100644 index a3c58fc4..00000000 --- a/karbor/tests/unit/api/v1/test_scheduled_operation.py +++ /dev/null @@ -1,221 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import uuidutils -from webob import exc - -from karbor.api.v1 import plans as plan_api -from karbor.api.v1 import scheduled_operations as operation_api -from karbor.api.v1 import triggers as trigger_api -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes -from karbor.tests.unit.api.v1 import test_triggers - - -class FakeRemoteOperationApi(object): - def __init__(self): - super(FakeRemoteOperationApi, self).__init__() - self._create_operation_exception = None - self._delete_operation_exception = None - - def create_scheduled_operation(self, context, operation): - if self._create_operation_exception: - raise self._create_operation_exception - - def delete_scheduled_operation(self, context, operation_id, trigger_id): - if self._delete_operation_exception: - raise self._delete_operation_exception - - -class ScheduledOperationApiTest(base.TestCase): - - def setUp(self): - super(ScheduledOperationApiTest, self).setUp() - - self.remote_operation_api = FakeRemoteOperationApi() - self.controller = operation_api.ScheduledOperationController() - self.controller.operationengine_api = self.remote_operation_api - - self.ctxt = context.RequestContext('demo', 'fakeproject', True) - self.req = fakes.HTTPRequest.blank('/v1/scheduled_operations') - - trigger = self._create_trigger() - self._plan = self._create_plan(uuidutils.generate_uuid()) - self.default_create_operation_param = { - "name": "123", - "description": "123", - "operation_type": "protect", - "trigger_id": trigger['trigger_info']['id'], - "operation_definition": { - "plan_id": self._plan['id'], - "provider_id": self._plan['provider_id'] - }, - } - - def test_create_operation_InvalidBody(self): - self.assertRaises(exception.ValidationError, - self.controller.create, - self.req, body={}) - - def test_create_operation_InvalidName(self): - body = self._get_create_operation_request_body() - self.assertRaises(exception.ValidationError, - self.controller.create, - self.req, body=body) - - def test_create_operation_invalid_trigger(self): - param = self.default_create_operation_param.copy() - param['trigger_id'] = 123 - body = self._get_create_operation_request_body(param) - self.assertRaises(exception.ValidationError, - self.controller.create, - self.req, body=body) - - def test_create_operation_receive_invalid_except(self): - self.remote_operation_api._create_operation_exception =\ - exception.TriggerIsInvalid(trigger_id=None) - - param = self.default_create_operation_param.copy() - body = self._get_create_operation_request_body(param) - self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - self.req, body=body) - - self.remote_operation_api._create_operation_exception = None - - def test_create_operation_receive_unknown_except(self): - self.remote_operation_api._create_operation_exception =\ - exception.TriggerNotFound(id=None) - - param = self.default_create_operation_param.copy() - body = self._get_create_operation_request_body(param) - self.assertRaises(exc.HTTPInternalServerError, - self.controller.create, - self.req, body=body) - - self.remote_operation_api._create_operation_exception = None - - def test_create_operation(self): - name = 'my protect' - param = self.default_create_operation_param.copy() - param['name'] = name - body = self._get_create_operation_request_body(param) - operation = self.controller.create(self.req, body=body) - self.assertEqual(name, operation['scheduled_operation']['name']) - - def test_create_retention_scheduled_operation(self): - name = 'my retention protect' - param = self.default_create_operation_param.copy() - param['name'] = name - param['operation_definition']['retention_duration'] = 100 - param['operation_definition']['max_backups'] = 3 - body = self._get_create_operation_request_body(param) - operation = self.controller.create(self.req, body=body) - self.assertEqual(name, operation['scheduled_operation']['name']) - - def test_delete_operation_receive_NotFound_except(self): - self.remote_operation_api._delete_operation_exception =\ - exception.ScheduledOperationStateNotFound(op_id=None) - - operation = self._create_one_operation() - self.assertRaises(exc.HTTPInternalServerError, - self.controller.delete, - self.req, - operation['scheduled_operation']['id']) - - self.remote_operation_api._delete_operation_exception = None - - def test_delete_operation(self): - operation = self._create_one_operation() - self.controller.delete(self.req, - operation['scheduled_operation']['id']) - self.assertRaises(exc.HTTPNotFound, - self.controller.show, - self.req, - operation['scheduled_operation']['id']) - - def test_show_operation_not_exist(self): - self.assertRaises(exc.HTTPNotFound, - self.controller.show, - self.req, - '2a9ce1f3-cc1a-4516-9435-0ebb13caa398') - - def test_show_operation_invalid_id(self): - self.assertRaises(exc.HTTPBadRequest, - self.controller.show, - self.req, 1) - - def test_show_operation(self): - operation = self._create_one_operation() - operation1 = self.controller.show( - self.req, operation['scheduled_operation']['id']) - self.assertEqual(operation['scheduled_operation']['id'], - operation1['scheduled_operation']['id']) - - def test_list_operation(self): - operation = self._create_one_operation() - operations = self.controller.index(self.req) - for item in operations['operations']: - if item['id'] == operation['scheduled_operation']['id']: - self.assertTrue(1) - - self.assertFalse(0) - - def _create_one_operation(self): - param = self.default_create_operation_param.copy() - body = self._get_create_operation_request_body(param) - return self.controller.create(self.req, body=body) - - def _get_create_operation_request_body(self, param={}): - return {"scheduled_operation": param} - - def _create_trigger(self): - create_trigger_param = { - "trigger_info": { - "name": "123", - "type": "time", - "properties": { - "format": "crontab", - "pattern": "* * * * *" - }, - } - } - controller = trigger_api.TriggersController() - controller.operationengine_api = test_triggers.FakeRemoteOperationApi() - req = fakes.HTTPRequest.blank('/v1/triggers') - return controller.create(req, body=create_trigger_param) - - @mock.patch( - 'karbor.context.RequestContext.can') - @mock.patch( - 'karbor.services.protection.rpcapi.ProtectionAPI.show_provider') - def _create_plan(self, provider_id, mock_provider, mock_policy): - create_plan_param = { - 'plan': { - 'name': '123', - 'provider_id': provider_id, - 'resources': [ - {'id': '39bb894794b741e982bd26144d2949f6', - 'type': 'OS::Cinder::Volume', 'name': '123'} - ], - 'parameters': {"OS::Cinder::Volume": {"backup_name": "test"}}, - } - } - controller = plan_api.PlansController() - mock_provider.return_value = fakes.PROVIDER_OS - req = fakes.HTTPRequest.blank('/v1/plans') - plan = controller.create(req, body=create_plan_param) - return plan['plan'] diff --git a/karbor/tests/unit/api/v1/test_services.py b/karbor/tests/unit/api/v1/test_services.py deleted file mode 100644 index 0dffc802..00000000 --- a/karbor/tests/unit/api/v1/test_services.py +++ /dev/null @@ -1,150 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from webob import exc - -from karbor.api.v1 import services -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - - -class ServiceApiTest(base.TestCase): - def setUp(self): - super(ServiceApiTest, self).setUp() - self.controller = services.ServiceController() - - @mock.patch('karbor.objects.service.ServiceList.get_all_by_args') - def test_service_list_with_admin_context(self, mock_get_all_by_args): - req = fakes.HTTPRequest.blank('/v1/os-services?host=host1', - use_admin_context=True) - self.controller.index(req) - self.assertTrue(mock_get_all_by_args.called) - - def test_service_list_with_non_admin_context(self): - req = fakes.HTTPRequest.blank('/v1/os-services', - use_admin_context=False) - self.assertRaises( - exception.PolicyNotAuthorized, self.controller.index, req) - - @mock.patch('karbor.objects.service.ServiceList.get_all_by_args') - def test_service_list_with_invalid_services(self, mock_get_all_by_args): - req = fakes.HTTPRequest.blank('/v1/os-services', - use_admin_context=True) - mock_get_all_by_args.side_effect = exception.NotFound() - self.assertRaises(exc.HTTPBadRequest, self.controller.index, req) - - @mock.patch('karbor.utils.service_is_up') - @mock.patch('karbor.objects.service.Service.get_by_id') - def test_service_update_with_admin_context( - self, mock_get_by_id, mock_service_is_up): - req = fakes.HTTPRequest.blank('/v1/os-services/1', - use_admin_context=True) - body = { - "status": 'disabled', - 'disabled_reason': 'reason' - } - mock_service = mock.MagicMock( - binary='karbor-operationengine', save=mock.MagicMock()) - mock_get_by_id.return_value = mock_service - mock_service_is_up.return_value = True - self.controller.update(req, "fake_id", body) - self.assertTrue(mock_get_by_id.called) - self.assertTrue(mock_service.save.called) - - def test_service_update_with_non_admin_context(self): - req = fakes.HTTPRequest.blank('/v1/os-services/1', - use_admin_context=False) - body = { - "status": 'disabled', - 'disabled_reason': 'reason' - } - self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.update, - req, - "fake_id", - body - ) - - @mock.patch('karbor.objects.service.Service.get_by_id') - def test_update_protection_services(self, mock_get_by_id): - req = fakes.HTTPRequest.blank('/v1/os-services/1', - use_admin_context=True) - body = { - "status": 'disabled', - 'disabled_reason': 'reason' - } - mock_service = mock.MagicMock(binary='karbor-protection') - mock_get_by_id.return_value = mock_service - self.assertRaises( - exc.HTTPBadRequest, - self.controller.update, - req, - "fake_id", - body - ) - - @mock.patch('karbor.objects.service.Service.get_by_id') - def test_service_update_with_service_not_found(self, - mock_get_by_id): - body = { - "status": 'disabled', - 'disabled_reason': 'reason' - } - req = fakes.HTTPRequest.blank('/v1/os-services/1', - use_admin_context=True) - mock_get_by_id.side_effect = exception.ServiceNotFound - self.assertRaises( - exc.HTTPNotFound, - self.controller.update, - req, - "fake_id", - body - ) - - @mock.patch('karbor.objects.service.Service.get_by_id') - def test_service_update_with_invalid_disabled_reason(self, mock_get_by_id): - req = fakes.HTTPRequest.blank('/v1/os-services/1', - use_admin_context=True) - body = { - "status": 'enabled', - 'disabled_reason': 'reason' - } - mock_service = mock.MagicMock( - binary='karbor-operationengine', save=mock.MagicMock()) - mock_get_by_id.return_value = mock_service - self.assertRaises( - exc.HTTPBadRequest, - self.controller.update, - req, - "fake_id", - body - ) - - @mock.patch('karbor.utils.service_is_up') - @mock.patch('karbor.objects.service.Service.get_by_id') - def test_service_update_with_enabled_status( - self, mock_get_by_id, mock_service_is_up): - req = fakes.HTTPRequest.blank('/v1/os-services/1', - use_admin_context=True) - body = { - "status": 'enabled' - } - mock_service = mock.MagicMock( - binary='karbor-operationengine', save=mock.MagicMock()) - mock_get_by_id.return_value = mock_service - mock_service_is_up.return_value = True - self.controller.update(req, "fake_id", body) - self.assertTrue(mock_get_by_id.called) - self.assertTrue(mock_service.save.called) diff --git a/karbor/tests/unit/api/v1/test_triggers.py b/karbor/tests/unit/api/v1/test_triggers.py deleted file mode 100755 index e99e390a..00000000 --- a/karbor/tests/unit/api/v1/test_triggers.py +++ /dev/null @@ -1,259 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from datetime import datetime -from unittest import mock - -from webob import exc - -from karbor.api.v1 import triggers as trigger_api -from karbor import context -from karbor import exception -from karbor.i18n import _ -from karbor import objects -from karbor.tests import base -from karbor.tests.unit.api import fakes - - -class FakeRemoteOperationApi(object): - def create_trigger(self, context, trigger): - if trigger.type not in ['time']: - msg = (_("Invalid trigger type:%s") % trigger.type) - raise exception.InvalidInput(msg) - - if trigger.properties['format'] not in ['crontab']: - msg = (_("Invalid trigger time format type")) - raise exception.InvalidInput(msg) - - def verify_trigger(self, context, trigger): - pass - - def delete_trigger(self, context, trigger_id): - pass - - def update_trigger(self, context, trigger): - pass - - -class TriggerApiTest(base.TestCase): - def setUp(self): - super(TriggerApiTest, self).setUp() - self.controller = trigger_api.TriggersController() - self.controller.operationengine_api = FakeRemoteOperationApi() - self.ctxt = context.RequestContext('demo', 'fakeproject', - True) - self.req = fakes.HTTPRequest.blank('/v1/triggers') - self.default_create_trigger_param = { - "name": "123", - "type": "time", - "properties": { - "format": "crontab", - "pattern": "* * * * *", - 'start_time': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), - }, - } - self.default_update_trigger_param = { - "name": "123", - "properties": { - "format": "crontab", - "pattern": "* * * * *", - 'start_time': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), - }, - } - - def test_create_trigger_InvalidBody(self): - self.assertRaises(exception.ValidationError, - self.controller.create, - self.req, body={}) - - def test_create_trigger_InvalidName(self): - body = self._get_create_trigger_request_body() - self.assertRaises(exception.ValidationError, - self.controller.create, - self.req, body=body) - - def test_create_trigger_invalid_trigger_type(self): - param = self.default_create_trigger_param.copy() - param['type'] = "123" - body = self._get_create_trigger_request_body(param) - self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - self.req, body=body) - - def test_create_trigger_invalid_trigger_formt_type(self): - param = self.default_create_trigger_param.copy() - param['properties']['format'] = "123" - body = self._get_create_trigger_request_body(param) - self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - self.req, body=body) - - def test_create_trigger(self): - name = 'every minutes' - param = self.default_create_trigger_param.copy() - param['name'] = name - body = self._get_create_trigger_request_body(param) - trigger = self.controller.create(self.req, body=body) - self.assertEqual(name, trigger['trigger_info']['name']) - - def test_delete_trigger_binded_with_operation(self): - trigger = self._create_one_trigger() - trigger_id = trigger['trigger_info']['id'] - self._create_scheduled_operation(trigger_id) - - self.assertRaises(exc.HTTPFailedDependency, - self.controller.delete, - self.req, - trigger_id) - - def test_delete_trigger(self): - trigger = self._create_one_trigger() - self.controller.delete(self.req, trigger['trigger_info']['id']) - self.assertRaises(exc.HTTPNotFound, - self.controller.show, - self.req, - trigger['trigger_info']['id']) - - def test_update_trigger_InvalidID(self): - param = self.default_update_trigger_param.copy() - body = self._get_create_trigger_request_body(param) - self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - self.req, 'fake-id', - body=body) - - def test_update_trigger_nonexist_trigger(self): - param = self.default_update_trigger_param.copy() - body = self._get_create_trigger_request_body(param) - self.assertRaises(exc.HTTPNotFound, - self.controller.update, - self.req, '42c8e647-cc13-4fc1-8d5b-b1e962290722', - body=body) - - def test_update_trigger_InvalidName(self): - trigger = self._create_one_trigger() - - param = self.default_update_trigger_param.copy() - param['name'] = 'a' * 256 - body = self._get_create_trigger_request_body(param) - self.assertRaises(ValueError, - self.controller.update, - self.req, trigger['trigger_info']['id'], - body=body) - - def test_update_trigger_miss_start_time(self): - trigger = self._create_one_trigger() - - param = self.default_update_trigger_param.copy() - param['properties'].pop('start_time') - body = self._get_create_trigger_request_body(param) - self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - self.req, trigger['trigger_info']['id'], - body=body) - - def test_update_trigger(self): - trigger = self._create_one_trigger() - - name = 'every minutes' - param = self.default_create_trigger_param.copy() - param['name'] = name - param['properties']['window'] = 10 - body = self._get_create_trigger_request_body(param) - trigger1 = self.controller.update( - self.req, trigger['trigger_info']['id'], body=body) - - self.assertEqual(name, trigger1['trigger_info']['name']) - self.assertEqual(10, int( - trigger1['trigger_info']['properties']['window'])) - - @mock.patch('karbor.services.operationengine.engine.triggers.timetrigger.' - 'time_trigger.utils.check_trigger_definition') - def test_update_trigger_invalid_windows(self, - mock_check_trigger_definition): - trigger = self._create_one_trigger() - - name = 'every minutes' - param = self.default_create_trigger_param.copy() - param['name'] = name - param['properties']['window'] = 'abc' - body = self._get_create_trigger_request_body(param) - mock_check_trigger_definition.return_value = exception.InvalidInput - self.assertRaises(exception.ValidationError, - self.controller.update, - self.req, trigger['trigger_info']['id'], body=body) - - def test_update_trigger_without_start_time(self): - trigger = self._create_one_trigger() - - name = 'every minutes' - param = self.default_create_trigger_param.copy() - param['name'] = name - param['properties'].pop('start_time') - body = self._get_create_trigger_request_body(param) - self.assertRaises( - exc.HTTPBadRequest, - self.controller.update, - self.req, - trigger['trigger_info']['id'], - body=body - ) - - def test_show_trigger_not_exist(self): - self.assertRaises(exc.HTTPNotFound, - self.controller.show, - self.req, - '2a9ce1f3-cc1a-4516-9435-0ebb13caa398') - - def test_show_trigger_invalid_id(self): - self.assertRaises(exc.HTTPBadRequest, - self.controller.show, - self.req, 1) - - def test_show_trigger(self): - trigger = self._create_one_trigger() - trigger1 = self.controller.show(self.req, - trigger['trigger_info']['id']) - self.assertEqual(trigger['trigger_info']['id'], - trigger1['trigger_info']['id']) - - def test_list_trigger(self): - trigger = self._create_one_trigger() - triggers = self.controller.index(self.req) - for item in triggers['triggers']: - if item['id'] == trigger['trigger_info']['id']: - self.assertTrue(1) - - self.assertFalse(0) - - def _create_one_trigger(self): - param = self.default_create_trigger_param.copy() - body = self._get_create_trigger_request_body(param) - return self.controller.create(self.req, body=body) - - def _get_create_trigger_request_body(self, param={}): - return {"trigger_info": param} - - def _create_scheduled_operation(self, trigger_id): - operation_info = { - "name": "123", - "description": "123", - "operation_type": "protect", - 'user_id': '123', - "project_id": "123", - "trigger_id": trigger_id, - "operation_definition": { - "plan_id": "" - }, - } - operation = objects.ScheduledOperation(self.ctxt, **operation_info) - operation.create() - return operation diff --git a/karbor/tests/unit/api/v1/test_verifications.py b/karbor/tests/unit/api/v1/test_verifications.py deleted file mode 100755 index 62ebb67a..00000000 --- a/karbor/tests/unit/api/v1/test_verifications.py +++ /dev/null @@ -1,127 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg -from webob import exc - -from karbor.api.v1 import verifications -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor.tests.unit.api import fakes - -CONF = cfg.CONF - -DEFAULT_PROJECT_ID = '39bb894794b741e982bd26144d2949f6' -DEFAULT_PROVIDER_ID = 'efc6a88b-9096-4bb6-8634-cda182a6e12a' -DEFAULT_CHECKPOINT_ID = '09edcbdc-d1c2-49c1-a212-122627b20968' -DEFAULT_PARAMETERS = { -} - - -class VerificationApiTest(base.TestCase): - def setUp(self): - super(VerificationApiTest, self).setUp() - self.controller = verifications.VerificationsController() - self.ctxt = context.RequestContext('demo', 'fakeproject', True) - - @mock.patch( - 'karbor.services.protection.api.API.verification') - @mock.patch( - 'karbor.objects.verification.Verification.create') - def test_verification_create(self, mock_verification_create, - mock_rpc_verification): - verification = self._verification_in_request_body() - body = {"verification": verification} - req = fakes.HTTPRequest.blank('/v1/verifications') - self.controller.create(req, body=body) - self.assertTrue(mock_verification_create.called) - self.assertTrue(mock_rpc_verification.called) - - def test_verification_create_InvalidBody(self): - verification = self._verification_in_request_body() - body = {"verificationxx": verification} - req = fakes.HTTPRequest.blank('/v1/verifications') - self.assertRaises(exception.ValidationError, - self.controller.create, - req, body=body) - - def test_verification_create_InvalidProviderId(self): - verification = self._verification_in_request_body( - provider_id="") - body = {"verification": verification} - req = fakes.HTTPRequest.blank('/v1/verifications') - self.assertRaises(exception.ValidationError, self.controller.create, - req, body=body) - - def test_verification_create_Invalidcheckpoint_id(self): - verification = self._verification_in_request_body( - checkpoint_id="") - body = {"verification": verification} - req = fakes.HTTPRequest.blank('/v1/verifications') - self.assertRaises(exception.ValidationError, self.controller.create, - req, body=body) - - @mock.patch( - 'karbor.api.v1.verifications.' - 'VerificationsController._get_all') - def test_verification_list_detail(self, mock_get_all): - req = fakes.HTTPRequest.blank('/v1/verifications') - self.controller.index(req) - self.assertTrue(mock_get_all.called) - - @mock.patch( - 'karbor.api.v1.verifications.' - 'VerificationsController._get_all') - @mock.patch('karbor.api.common.ViewBuilder._get_collection_links') - def test_verification_list_detail_with_verifications_links(self, - mock_get_links, - mock_get_all): - except_value = [{ - "rel": "next", - "href": "/v1/verifications?marker" - }] - req = fakes.HTTPRequest.blank('/v1/verifications') - mock_get_links.return_value = except_value - return_value = self.controller.index(req) - self.assertTrue(mock_get_all.called) - self.assertEqual(return_value['verifications_links'], except_value) - - @mock.patch( - 'karbor.api.v1.verifications.' - 'VerificationsController._verification_get') - def test_verification_show(self, mock_verification_get): - req = fakes.HTTPRequest.blank('/v1/verifications') - self.controller.show( - req, '2a9ce1f3-cc1a-4516-9435-0ebb13caa398') - self.assertTrue(mock_verification_get.called) - - def test_verification_show_Invalid(self): - req = fakes.HTTPRequest.blank('/v1/verifications/1') - self.assertRaises( - exc.HTTPBadRequest, self.controller.show, - req, "1") - - def _verification_in_request_body( - self, provider_id=DEFAULT_PROVIDER_ID, - checkpoint_id=DEFAULT_CHECKPOINT_ID, - parameters=DEFAULT_PARAMETERS): - verification_req = { - 'provider_id': provider_id, - 'checkpoint_id': checkpoint_id, - 'parameters': parameters, - } - - return verification_req diff --git a/karbor/tests/unit/clients/__init__.py b/karbor/tests/unit/clients/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/clients/test_cinder_client.py b/karbor/tests/unit/clients/test_cinder_client.py deleted file mode 100644 index 5fd145c2..00000000 --- a/karbor/tests/unit/clients/test_cinder_client.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import cinder -from karbor.tests import base - - -class CinderClientTest(base.TestCase): - def setUp(self): - super(CinderClientTest, self).setUp() - - self._public_url = 'http://127.0.0.1:8776/v3/abcd' - - service_catalog = [ - {'type': 'volumev3', - 'name': 'cinderv3', - 'endpoints': [{'publicURL': self._public_url}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - def test_create_client_by_endpoint(self): - cfg.CONF.set_default('cinder_endpoint', - 'http://127.0.0.1:8776/v3', - 'cinder_client') - client = cinder.create(self._context, cfg.CONF) - self.assertEqual('volumev3', client.client.service_type) - self.assertEqual('http://127.0.0.1:8776/v3/abcd', - client.client.management_url) - - def test_create_client_by_catalog(self): - client = cinder.create(self._context, cfg.CONF) - self.assertEqual('volumev3', client.client.service_type) - self.assertEqual('http://127.0.0.1:8776/v3/abcd', - client.client.management_url) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('cinderclient.client.Client') - def test_create_client(self, create, get_url): - get_url.return_value = self._public_url - - client_config = cfg.CONF[cinder.CONFIG_GROUP] - client_version = cinder.CINDERCLIENT_VERSION - session = object() - args = { - 'project_id': self._context.project_id, - 'cacert': client_config.cinder_ca_cert_file, - 'insecure': client_config.cinder_auth_insecure, - } - - cinder.create(self._context, cfg.CONF) - create.assert_called_with(client_version, **args) - - cinder.create(self._context, cfg.CONF, session=session) - create.assert_called_with(client_version, - endpoint_override=self._public_url, - session=session) diff --git a/karbor/tests/unit/clients/test_eisoo.py b/karbor/tests/unit/clients/test_eisoo.py deleted file mode 100644 index c3964119..00000000 --- a/karbor/tests/unit/clients/test_eisoo.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2016 Shanghai EISOO Information Technology Corp. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from karbor.context import RequestContext -from karbor.services.protection.clients import eisoo - -from karbor.tests import base - - -class FakeConfig(object): - def __init__(self): - super(FakeConfig, self).__init__() - self.eisoo_client = EisooClient() - - def __call__(self, args): - pass - - def register_opts(self, opts, **kwargs): - pass - - -class EisooClient(object): - def __init__(self): - super(EisooClient, self).__init__() - self.eisoo_endpoint = 'eisoo_endpoint' - self.eisoo_app_id = 'eisoo_app_id' - self.eisoo_app_secret = 'eisoo_app_secret' - - -class ABClientTest(base.TestCase): - def setUp(self): - super(ABClientTest, self).setUp() - self._context = RequestContext(user_id='demo', - project_id='asdf', - auth_token='qwe', - service_catalog=None) - - @mock.patch('oslo_config.cfg.ConfigOpts', FakeConfig) - @mock.patch('karbor.utils.find_config') - @mock.patch('os.path.abspath') - def test_create_client_by_config_file(self, mock_abspath, mock_findconfig): - mock_findconfig.return_value = '/etc/provider.d' - mock_abspath.return_value = '' - - client = eisoo.create(self._context, None) - self.assertEqual('eisoo_app_id', client._app_id) - - def tearDown(self): - super(ABClientTest, self).tearDown() diff --git a/karbor/tests/unit/clients/test_freezer_client.py b/karbor/tests/unit/clients/test_freezer_client.py deleted file mode 100644 index be2bacf6..00000000 --- a/karbor/tests/unit/clients/test_freezer_client.py +++ /dev/null @@ -1,71 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from keystoneauth1 import session as keystone_session -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import freezer -from karbor.tests import base - - -class FreezerClientTest(base.TestCase): - def setUp(self): - super(FreezerClientTest, self).setUp() - self._public_url = 'http://127.0.0.1:9090' - self._auth_url = 'http://127.0.0.1/v2.0' - service_catalog = [ - {'type': 'backup', - 'name': 'freezer', - 'endpoints': [{'publicURL': self._public_url}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - project_name='efgh', - auth_token='ijkl', - service_catalog=service_catalog) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('freezerclient.v1.client.Client') - def test_create_client(self, create, get_url): - get_url.return_value = self._public_url - - session = keystone_session.Session(auth=None) - freezer.create(self._context, cfg.CONF, session=session) - create.assert_called_with(endpoint=self._public_url, - session=session) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('freezerclient.v1.client.Client') - def test_create_client_without_session(self, create, get_url): - get_url.return_value = self._public_url - - client_config = cfg.CONF[freezer.CONFIG_GROUP] - # due to freezer client bug, auth_uri should be specified - cfg.CONF.set_default('auth_uri', - self._auth_url, - freezer.CONFIG_GROUP) - args = { - 'project_id': self._context.project_id, - 'project_name': self._context.project_name, - 'cacert': client_config.freezer_ca_cert_file, - 'insecure': client_config.freezer_auth_insecure, - 'endpoint': self._public_url, - 'token': self._context.auth_token, - 'auth_url': self._auth_url, - } - - freezer.create(self._context, cfg.CONF) - create.assert_called_with(**args) diff --git a/karbor/tests/unit/clients/test_glance_client.py b/karbor/tests/unit/clients/test_glance_client.py deleted file mode 100644 index 1a008196..00000000 --- a/karbor/tests/unit/clients/test_glance_client.py +++ /dev/null @@ -1,73 +0,0 @@ - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import glance -from karbor.tests import base - - -class GlanceClientTest(base.TestCase): - def setUp(self): - super(GlanceClientTest, self).setUp() - - self._public_url = 'http://127.0.0.1:9292' - - service_catalog = [ - { - 'endpoints': [{'publicURL': self._public_url}], - 'type': 'image', - 'name': 'glance', - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - def test_create_client_by_endpoint(self): - cfg.CONF.set_default('glance_endpoint', - 'http://127.0.0.1:9292', - 'glance_client') - gc = glance.create(self._context, cfg.CONF) - self.assertEqual('http://127.0.0.1:9292', gc.http_client.endpoint) - - def test_create_client_by_catalog(self): - gc = glance.create(self._context, cfg.CONF) - self.assertEqual('http://127.0.0.1:9292', gc.http_client.endpoint) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('glanceclient.client.Client') - def test_create_client(self, create, get_url): - get_url.return_value = self._public_url - - client_config = cfg.CONF[glance.CONFIG_GROUP] - client_version = glance.GLANCECLIENT_VERSION - session = object() - args = { - 'endpoint': self._public_url, - 'token': self._context.auth_token, - 'cacert': client_config.glance_ca_cert_file, - 'insecure': client_config.glance_auth_insecure, - } - - glance.create(self._context, cfg.CONF) - create.assert_called_with(client_version, **args) - - glance.create(self._context, cfg.CONF, session=session) - create.assert_called_with(client_version, - endpoint=self._public_url, - session=session) diff --git a/karbor/tests/unit/clients/test_k8s_client.py b/karbor/tests/unit/clients/test_k8s_client.py deleted file mode 100644 index a5d3fd46..00000000 --- a/karbor/tests/unit/clients/test_k8s_client.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import k8s -from karbor.tests import base - - -class KubernetesClientTest(base.TestCase): - def setUp(self): - super(KubernetesClientTest, self).setUp() - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=None) - - self.conf = cfg.ConfigOpts() - k8s.register_opts(self.conf) - self.host_url = 'https://192.168.98.35:6443' - self.conf.set_default('k8s_host', - self.host_url, - 'k8s_client') - self.conf.set_override('k8s_ssl_ca_cert', - '/etc/provider.d/server-ca.crt', - 'k8s_client') - self.conf.set_override('k8s_cert_file', - '/etc/provider.d/client-admin.crt', - 'k8s_client') - self.conf.set_override('k8s_key_file', - '/etc/provider.d/client-admin.key', - 'k8s_client') - - def test_create_client(self): - - client = k8s.create(self._context, self.conf) - self.assertEqual(client.api_client.configuration.host, self.host_url) diff --git a/karbor/tests/unit/clients/test_manila_client.py b/karbor/tests/unit/clients/test_manila_client.py deleted file mode 100644 index 360794e9..00000000 --- a/karbor/tests/unit/clients/test_manila_client.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from keystoneauth1 import session as keystone_session -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import manila -from karbor.tests import base - - -class ManilaClientTest(base.TestCase): - def setUp(self): - super(ManilaClientTest, self).setUp() - - self._public_url = 'http://127.0.0.1:8776/v2/abcd' - - service_catalog = [ - {'type': 'sharev2', - 'name': 'manilav2', - 'endpoints': [{'publicURL': self._public_url}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('manilaclient.client.Client') - def test_create_client(self, create, get_url): - get_url.return_value = self._public_url - - client_version = manila.MANILACLIENT_VERSION - - session = keystone_session.Session(auth=None) - manila.create(self._context, cfg.CONF, session=session) - create.assert_called_with(client_version, - endpoint_override=self._public_url, - session=session) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('manilaclient.client.Client') - def test_create_client_without_session(self, create, get_url): - get_url.return_value = self._public_url - - client_config = cfg.CONF[manila.CONFIG_GROUP] - client_version = manila.MANILACLIENT_VERSION - args = { - 'input_auth_token': self._context.auth_token, - 'project_id': self._context.project_id, - 'service_catalog_url': self._public_url, - 'cacert': client_config.manila_ca_cert_file, - 'insecure': client_config.manila_auth_insecure, - } - - manila.create(self._context, cfg.CONF) - create.assert_called_with(client_version, **args) diff --git a/karbor/tests/unit/clients/test_neutron_client.py b/karbor/tests/unit/clients/test_neutron_client.py deleted file mode 100644 index 980099b5..00000000 --- a/karbor/tests/unit/clients/test_neutron_client.py +++ /dev/null @@ -1,71 +0,0 @@ - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import neutron -from karbor.tests import base - - -class NeutronClientTest(base.TestCase): - def setUp(self): - super(NeutronClientTest, self).setUp() - - self._public_url = 'http://127.0.0.1:9696' - - service_catalog = [ - { - 'endpoints': [{'publicURL': self._public_url}], - 'type': 'network', - 'name': 'neutron', - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - def test_create_client_by_endpoint(self): - cfg.CONF.set_default('neutron_endpoint', - 'http://127.0.0.1:9696', - 'neutron_client') - nc = neutron.create(self._context, cfg.CONF) - self.assertEqual('http://127.0.0.1:9696', nc.httpclient.endpoint_url) - - def test_create_client_by_catalog(self): - nc = neutron.create(self._context, cfg.CONF) - self.assertEqual('http://127.0.0.1:9696', nc.httpclient.endpoint_url) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('neutronclient.client.construct_http_client') - def test_create_client(self, create, get_url): - get_url.return_value = self._public_url - - client_config = cfg.CONF[neutron.CONFIG_GROUP] - session = object() - args = { - 'endpoint_url': self._public_url, - 'token': self._context.auth_token, - 'cacert': client_config.neutron_ca_cert_file, - 'insecure': client_config.neutron_auth_insecure, - } - - neutron.create(self._context, cfg.CONF) - create.assert_called_with(**args) - - neutron.create(self._context, cfg.CONF, session=session) - create.assert_called_with(endpoint_override=self._public_url, - session=session) diff --git a/karbor/tests/unit/clients/test_nova_client.py b/karbor/tests/unit/clients/test_nova_client.py deleted file mode 100644 index c811b6fa..00000000 --- a/karbor/tests/unit/clients/test_nova_client.py +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import session as keystone_session -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import nova -from karbor.tests import base - - -class NovaClientTest(base.TestCase): - def setUp(self): - super(NovaClientTest, self).setUp() - service_catalog = [ - {'type': 'compute', - 'name': 'nova', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - def test_create_client_by_endpoint(self): - cfg.CONF.set_default('nova_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'nova_client') - - client = nova.create(self._context, cfg.CONF, - session=keystone_session.Session(auth=None)) - self.assertEqual('compute', client.client.service_type) - self.assertEqual('http://127.0.0.1:8774/v2.1/abcd', - client.client.endpoint_override) - - def test_create_client_by_catalog(self): - client = nova.create(self._context, cfg.CONF, - session=keystone_session.Session(auth=None)) - self.assertEqual('compute', client.client.service_type) - self.assertEqual('http://127.0.0.1:8774/v2.1/abcd', - client.client.endpoint_override) diff --git a/karbor/tests/unit/clients/test_swift_client.py b/karbor/tests/unit/clients/test_swift_client.py deleted file mode 100644 index 744f527e..00000000 --- a/karbor/tests/unit/clients/test_swift_client.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import swift -from karbor.tests import base - - -class SwiftClientTest(base.TestCase): - def setUp(self): - super(SwiftClientTest, self).setUp() - service_catalog = [ - { - 'endpoints': [ - {'publicURL': 'http://127.0.0.1:8080/v1/AUTH_abcd', } - ], - 'type': 'object-store', - 'name': 'swift', - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - self.conf = cfg.ConfigOpts() - swift.register_opts(self.conf) - - def test_create_client_by_keystone(self): - self.skipTest('test needs revision') - auth_url = 'http://127.0.0.1/identity/' - self.conf.set_default('swift_auth_url', - auth_url, - 'swift_client') - self.conf.set_override('swift_user', 'demo', 'swift_client') - self.conf.set_override('swift_key', 'secrete', 'swift_client') - self.conf.set_override('swift_tenant_name', 'abcd', 'swift_client') - sc = swift.create(self._context, self.conf) - self.assertEqual(sc.authurl, auth_url) - self.assertEqual('demo', sc.user) - self.assertEqual('secrete', sc.key) - self.assertEqual('abcd', sc.os_options['tenant_name']) diff --git a/karbor/tests/unit/clients/test_trove_client.py b/karbor/tests/unit/clients/test_trove_client.py deleted file mode 100644 index d0727b40..00000000 --- a/karbor/tests/unit/clients/test_trove_client.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from keystoneauth1 import session as keystone_session -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.services.protection.clients import trove -from karbor.tests import base - - -class TroveClientTest(base.TestCase): - def setUp(self): - super(TroveClientTest, self).setUp() - - self._public_url = 'http://127.0.0.1:8776/v2/abcd' - - service_catalog = [ - {'type': 'database', - 'name': 'trove', - 'endpoints': [{'publicURL': self._public_url}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('troveclient.client.Client') - def test_create_client(self, create, get_url): - get_url.return_value = self._public_url - - client_version = trove.TROVECLIENT_VERSION - - session = keystone_session.Session(auth=None) - trove.create(self._context, cfg.CONF, session=session) - create.assert_called_with(client_version, - endpoint_override=self._public_url, - session=session) - - @mock.patch('karbor.services.protection.clients.utils.get_url') - @mock.patch('troveclient.client.Client') - def test_create_client_without_session(self, create, get_url): - get_url.return_value = self._public_url - - client_config = cfg.CONF[trove.CONFIG_GROUP] - client_version = trove.TROVECLIENT_VERSION - args = { - 'input_auth_token': self._context.auth_token, - 'project_id': self._context.project_id, - 'service_catalog_url': self._public_url, - 'cacert': client_config.trove_ca_cert_file, - 'insecure': client_config.trove_auth_insecure, - } - - trove.create(self._context, cfg.CONF) - create.assert_called_with(client_version, **args) diff --git a/karbor/tests/unit/clients/test_utils.py b/karbor/tests/unit/clients/test_utils.py deleted file mode 100644 index 390e4d94..00000000 --- a/karbor/tests/unit/clients/test_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from karbor.common import karbor_keystone_plugin as kkp -from karbor.context import RequestContext -from karbor import exception -from karbor.services.protection.clients import utils -from karbor.tests import base - - -class ClientUtilsTest(base.TestCase): - def setUp(self): - super(ClientUtilsTest, self).setUp() - - self._service = '' - self._public_url = 'http://127.0.0.1:8776/v3/abcd' - - service_catalog = [ - {'type': 'volumev3', - 'name': 'cinderv3', - 'endpoints': [{'publicURL': self._public_url}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - @mock.patch.object(cfg.ConfigOpts, '_get') - def test_get_url_by_endpoint(self, get_opt): - endpoint = 'http://127.0.0.1:8776/v3' - get_opt.return_value = endpoint - - self.assertEqual( - endpoint, utils.get_url(self._service, self._context, cfg.CONF)) - - self.assertEqual( - endpoint + '/%s' % self._context.project_id, - utils.get_url(self._service, self._context, cfg.CONF, - '%(url)s/%(project)s')) - - @mock.patch.object(cfg.ConfigOpts, '_get') - def test_get_url_by_catalog(self, get_opt): - def _get_opt(name): - if name.find('catalog_info') >= 0: - return 'volumev3:cinderv3:publicURL' - return None - get_opt.side_effect = _get_opt - - self.assertEqual( - self._public_url, - utils.get_url(self._service, self._context, cfg.CONF)) - - @mock.patch.object(kkp.KarborKeystonePlugin, 'get_service_endpoint') - def test_get_url_by_keystone_plugin(self, get_endpoint): - endpoint = "http://127.0.0.1:8776" - keystone_plugin = kkp.KarborKeystonePlugin() - get_endpoint.return_value = endpoint - - config = mock.Mock() - config.test_service_endpoint = None - config.test_service_catalog_info = None - self.assertEqual( - endpoint, - utils.get_url('test_service', self._context, config, - keystone_plugin=keystone_plugin)) - - @mock.patch.object(cfg.ConfigOpts, '_get') - def test_get_url_except(self, get_opt): - get_opt.return_value = None - self.assertRaisesRegex(exception.KarborException, - "Couldn't find the endpoint of service.*", - utils.get_url, self._service, - self._context, cfg.CONF) diff --git a/karbor/tests/unit/cmd/__init__.py b/karbor/tests/unit/cmd/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/cmd/test_status.py b/karbor/tests/unit/cmd/test_status.py deleted file mode 100644 index 31deffc2..00000000 --- a/karbor/tests/unit/cmd/test_status.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2018 NEC, Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_upgradecheck.upgradecheck import Code - -from karbor.cmd import status -from karbor.tests import base - - -class TestUpgradeChecks(base.TestCase): - - def setUp(self): - super(TestUpgradeChecks, self).setUp() - self.cmd = status.Checks() - - def test__sample_check(self): - check_result = self.cmd._sample_check() - self.assertEqual( - Code.SUCCESS, check_result.code) diff --git a/karbor/tests/unit/common/__init__.py b/karbor/tests/unit/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/common/test_karbor_keystone_plugin.py b/karbor/tests/unit/common/test_karbor_keystone_plugin.py deleted file mode 100644 index caef1eb0..00000000 --- a/karbor/tests/unit/common/test_karbor_keystone_plugin.py +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import karbor_keystone_plugin -from karbor.tests import base - - -class KarborKeystonePluginTest(base.TestCase): - - def setUp(self): - super(KarborKeystonePluginTest, self).setUp() - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='trustee', - poll_interval=0, - ) - cfg.CONF.set_default('project_name', - 'services', - "trustee") - self.kc_plugin = karbor_keystone_plugin.KarborKeystonePlugin() - self.kc_plugin.client.services.list = mock.MagicMock() - self.kc_plugin.client.endpoints.list = mock.MagicMock() - self.kc_plugin.client.services.list.return_value = ( - 'http://192.168.1.2:8799') - - def test_get_service_endpoint_with_slash_end(self): - self.kc_plugin._auth_uri = 'http://192.168.1.1/identity/v3/' - self.kc_plugin.get_service_endpoint( - 'karbor', 'data-protect', 'fake_region_id', 'public') - self.kc_plugin.client.services.list.assert_called_once_with( - name='karbor', - service_type='data-protect', - base_url='http://192.168.1.1/identity/v3' - ) - - def test_get_service_endpoint_with_no_slash_end(self): - self.kc_plugin._auth_uri = 'http://192.168.1.1/identity/v3' - self.kc_plugin.get_service_endpoint( - 'karbor', 'data-protect', 'fake_region_id', 'public') - self.kc_plugin.client.services.list.assert_called_once_with( - name='karbor', - service_type='data-protect', - base_url='http://192.168.1.1/identity/v3' - ) - - def test_service_auth_plugin_with_project_name(self): - self.assertEqual(self.kc_plugin.service_auth_plugin._project_name, - 'services') diff --git a/karbor/tests/unit/common/test_notification.py b/karbor/tests/unit/common/test_notification.py deleted file mode 100644 index 46a87af8..00000000 --- a/karbor/tests/unit/common/test_notification.py +++ /dev/null @@ -1,187 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The notification module.""" -from unittest import mock -from unittest.mock import patch - -from karbor.common import notification -from karbor.common.notification import EndNotification -from karbor.common.notification import StartNotification -from karbor import context -from karbor import exception -from karbor import rpc -from karbor.tests import base - - -class TestEndNotification(base.TestCase): - - def setUp(self): - super(TestEndNotification, self).setUp() - self.context = KarborTestContext(self) - - def test_call(self): - with patch.object(self.context, "notification") as notification: - with EndNotification(self.context): - pass - self.assertTrue(notification.notify_end.called) - - def server_exception(self, server_type): - with patch.object(self.context, "notification") as notification: - try: - with EndNotification(self.context): - raise exception.InvalidInput - except Exception: - self.assertTrue(notification.notify_exc_info.called) - - -class KarborTestContext(context.RequestContext): - - def __init__(self, test_case, **kwargs): - super(KarborTestContext, self).__init__(user_id='demo', - project_id='abcd', - auth_token='efgh') - self.notification = KarborTestNotification( - self, request_id='req_id') - - -class TestStartNotification(base.TestCase): - - def setUp(self): - super(TestStartNotification, self).setUp() - self.context = KarborTestContext(self) - - def test_call(self): - with patch.object(self.context, "notification") as notification: - with StartNotification(self.context): - pass - self.assertTrue(notification.notify_start.called) - - -class KarborTestNotification(notification.KaborAPINotification): - - def event_type(self): - return 'plan_test' - - def required_start_traits(self): - return ['name'] - - def optional_start_traits(self): - return ['parameters'] - - def required_end_traits(self): - return ['name'] - - -class TestKarborNotification(base.TestCase): - - def setUp(self): - super(TestKarborNotification, self).setUp() - self.test_n = KarborTestNotification(mock.Mock(), request=mock.Mock()) - - def test_missing_required_start_traits(self): - self.assertRaisesRegex(exception.InvalidInput, - self.test_n.required_start_traits()[0], - self.test_n.notify_start) - - def test_invalid_start_traits(self): - self.assertRaisesRegex(exception.InvalidInput, - "The following required keys", - self.test_n.notify_start, foo='bar') - - def test_missing_required_end_traits(self): - self.assertRaisesRegex(exception.InvalidInput, - self.test_n.required_end_traits()[0], - self.test_n.notify_end) - - def test_invalid_end_traits(self): - self.assertRaisesRegex(exception.InvalidInput, - "The following required keys", - self.test_n.notify_end, foo='bar') - - def test_missing_required_error_traits(self): - self.assertRaisesRegex(exception.InvalidInput, - self.test_n.required_error_traits()[0], - self.test_n._notify, 'error', - self.test_n.required_error_traits(), []) - - @patch.object(rpc, 'get_notifier') - def test_start_event(self, notifier): - self.test_n.notify_start(name='foo') - self.assertTrue(notifier().info.called) - a, _ = notifier().info.call_args - self.assertEqual('karbor.plan_test.start', a[1]) - - @patch.object(rpc, 'get_notifier') - def test_end_event(self, notifier): - self.test_n.notify_end(name='foo') - self.assertTrue(notifier().info.called) - a, _ = notifier().info.call_args - self.assertEqual('karbor.plan_test.end', a[1]) - - @patch.object(rpc, 'get_notifier') - def test_verify_base_values(self, notifier): - self.test_n.notify_start(name='foo') - self.assertTrue(notifier().info.called) - a, _ = notifier().info.call_args - payload = a[2] - self.assertIn('client_ip', payload) - self.assertIn('request_id', payload) - self.assertIn('tenant_id', payload) - - @patch.object(rpc, 'get_notifier') - def test_verify_required_start_args(self, notifier): - self.test_n.notify_start(name='foo') - self.assertTrue(notifier().info.called) - a, _ = notifier().info.call_args - payload = a[2] - self.assertIn('name', payload) - - @patch.object(rpc, 'get_notifier') - def test_verify_optional_start_args(self, notifier): - self.test_n.notify_start(name='foo', parameters="test") - self.assertTrue(notifier().info.called) - a, _ = notifier().info.call_args - payload = a[2] - self.assertIn('parameters', payload) - - @patch.object(rpc, 'get_notifier') - def test_verify_required_end_args(self, notifier): - self.test_n.notify_end(name='foo') - self.assertTrue(notifier().info.called) - a, _ = notifier().info.call_args - payload = a[2] - self.assertIn('name', payload) - - def _test_notify_callback(self, fn, *args, **kwargs): - with patch.object(rpc, 'get_notifier') as notifier: - mock_callback = mock.Mock() - self.test_n.register_notify_callback(mock_callback) - mock_context = mock.Mock() - mock_context.notification = mock.Mock() - self.test_n.context = mock_context - fn(*args, **kwargs) - self.assertTrue(notifier().info.called) - self.assertTrue(mock_callback.called) - self.test_n.register_notify_callback(None) - - def test_notify_callback(self): - required_keys = { - 'name': 'name', - 'parameters': 'parameters', - } - self._test_notify_callback(self.test_n.notify_start, - **required_keys) - self._test_notify_callback(self.test_n.notify_end, - **required_keys) - self._test_notify_callback(self.test_n.notify_exc_info, - 'error', 'exc') diff --git a/karbor/tests/unit/conf_fixture.py b/karbor/tests/unit/conf_fixture.py deleted file mode 100644 index 0c738fb1..00000000 --- a/karbor/tests/unit/conf_fixture.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from keystoneauth1 import loading -from oslo_config import cfg - - -CONF = cfg.CONF - -CONF.import_opt('policy_file', 'karbor.policy', group='oslo_policy') -CONF.import_opt('provider_config_dir', 'karbor.services.protection.provider') - - -def set_defaults(conf): - conf.set_default('connection', 'sqlite://', group='database') - conf.set_default('sqlite_synchronous', False, group='database') - conf.set_default('policy_dirs', [], group='oslo_policy') - conf.set_default('auth_strategy', 'noauth') - conf.set_default('state_path', os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..', '..'))) - conf.set_default('provider_config_dir', - os.path.join(os.path.dirname(__file__), 'fake_providers')) - loading.register_auth_conf_options(conf, 'trustee') - opts = loading.get_auth_plugin_conf_options('password') - conf.register_opts(opts, 'trustee') - conf.set_default('auth_type', 'password', group='trustee') - conf.set_default('auth_section', None, group='trustee') - conf.set_default('auth_url', 'http://192.168.1.2/identity', - group='trustee') - conf.set_default('username', 'karbor', group='trustee') - conf.set_default('password', 'password', group='trustee') - conf.set_default('user_domain_id', 'default', group='trustee') - conf.set_default('trigger_poll_interval', 1) diff --git a/karbor/tests/unit/db/__init__.py b/karbor/tests/unit/db/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/db/test_models.py b/karbor/tests/unit/db/test_models.py deleted file mode 100644 index d54303d5..00000000 --- a/karbor/tests/unit/db/test_models.py +++ /dev/null @@ -1,853 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for Models Database.""" - -from datetime import datetime -from datetime import timedelta -from oslo_config import cfg -from oslo_utils import uuidutils -import six -import uuid - -from karbor import context -from karbor import db -from karbor import exception -from karbor.tests import base - -from oslo_utils import timeutils - - -CONF = cfg.CONF - - -class ModelBaseTestCase(base.TestCase): - """Base Test cases which supplies assert Objects equal or not.""" - - def _dict_from_object(self, obj, ignored_keys): - if ignored_keys is None: - ignored_keys = [] - if isinstance(obj, dict): - items = obj.items() - else: - items = obj.iteritems() - return {k: v for k, v in items - if k not in ignored_keys} - - def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): - obj1 = self._dict_from_object(obj1, ignored_keys) - obj2 = self._dict_from_object(obj2, ignored_keys) - - self.assertEqual( - len(obj1), len(obj2), - "Keys mismatch: %s" % six.text_type( - set(obj1.keys()) ^ set(obj2.keys()))) - for key, value in obj1.items(): - self.assertEqual(value, obj2[key]) - - -class ServicesDbTestCase(base.TestCase): - """Test cases for Services database table.""" - - def setUp(self): - super(ServicesDbTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id='user_id', - project_id='project_id', - is_admin=True) - - def test_services_create(self): - service_ref = db.service_create(self.ctxt, - {'host': 'hosttest', - 'binary': 'binarytest', - 'topic': 'topictest', - 'report_count': 0}) - self.assertEqual('hosttest', service_ref['host']) - - def test_services_get(self): - service_ref = db.service_create(self.ctxt, - {'host': 'hosttest1', - 'binary': 'binarytest1', - 'topic': 'topictest1', - 'report_count': 0}) - - service_get_ref = db.service_get(self.ctxt, service_ref['id']) - self.assertEqual('hosttest1', service_ref['host']) - self.assertEqual('hosttest1', service_get_ref['host']) - - def test_service_destroy(self): - service_ref = db.service_create(self.ctxt, - {'host': 'hosttest2', - 'binary': 'binarytest2', - 'topic': 'topictest2', - 'report_count': 0}) - service_id = service_ref['id'] - db.service_destroy(self.ctxt, service_id) - self.assertRaises(exception.ServiceNotFound, db.service_get, - self.ctxt, service_id) - - def test_service_update(self): - service_ref = db.service_create(self.ctxt, - {'host': 'hosttest3', - 'binary': 'binarytest3', - 'topic': 'topictest3', - 'report_count': 0}) - service_id = service_ref['id'] - service_update_ref = db.service_update(self.ctxt, service_id, - {'host': 'hosttest4', - 'binary': 'binarytest4', - 'topic': 'topictest4', - 'report_count': 0}) - self.assertEqual('hosttest3', service_ref['host']) - self.assertEqual('hosttest4', service_update_ref['host']) - - def test_service_get_by_host_and_topic(self): - service_ref = db.service_create(self.ctxt, - {'host': 'hosttest5', - 'binary': 'binarytest5', - 'topic': 'topictest5', - 'report_count': 0}) - - service_get_ref = db.service_get_by_host_and_topic(self.ctxt, - 'hosttest5', - 'topictest5') - self.assertEqual('hosttest5', service_ref['host']) - self.assertEqual('hosttest5', service_get_ref['host']) - - -class TriggerTestCase(base.TestCase): - """Test cases for triggers table.""" - - def setUp(self): - super(TriggerTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id='user_id', - project_id='project_id') - - def _create_trigger(self): - values = { - 'id': "0354ca9ddcd046b693340d78759fd274", - 'name': 'first trigger', - 'project_id': self.ctxt.tenant, - 'type': 'time', - 'properties': '{}', - } - return db.trigger_create(self.ctxt, values) - - def test_trigger_create(self): - trigger_ref = self._create_trigger() - self.assertEqual('time', trigger_ref['type']) - - def test_trigger_delete(self): - trigger_ref = self._create_trigger() - db.trigger_delete(self.ctxt, trigger_ref['id']) - - self.assertRaises(exception.TriggerNotFound, - db.trigger_delete, - self.ctxt, trigger_ref['id']) - - self.assertRaises(exception.TriggerNotFound, - db.trigger_get, - self.ctxt, trigger_ref['id']) - - self.assertRaises(exception.TriggerNotFound, - db.trigger_delete, self.ctxt, '100') - - def test_trigger_update(self): - trigger_ref = self._create_trigger() - id = trigger_ref['id'] - trigger_ref = db.trigger_update(self.ctxt, id, {'type': 'event'}) - self.assertEqual('event', trigger_ref['type']) - - trigger_ref = db.trigger_get(self.ctxt, id) - self.assertEqual('event', trigger_ref['type']) - - self.assertRaises(exception.TriggerNotFound, - db.trigger_update, - self.ctxt, '100', {"type": "event"}) - - def test_trigger_get(self): - trigger_ref = self._create_trigger() - trigger_ref = db.trigger_get(self.ctxt, trigger_ref['id']) - self.assertEqual('time', trigger_ref['type']) - - -class ScheduledOperationTestCase(base.TestCase): - """Test cases for scheduled_operations table.""" - - def setUp(self): - super(ScheduledOperationTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id='user_id', - project_id='project_id') - - def _create_scheduled_operation(self): - values = { - 'id': '0354ca9ddcd046b693340d78759fd274', - 'name': 'protect vm', - 'description': 'protect vm resource', - 'operation_type': 'protect', - 'user_id': self.ctxt.user_id, - 'project_id': self.ctxt.tenant, - 'trigger_id': '0354ca9ddcd046b693340d78759fd275', - 'operation_definition': '{}' - } - return db.scheduled_operation_create(self.ctxt, values) - - def test_scheduled_operation_create(self): - operation_ref = self._create_scheduled_operation() - self.assertEqual('protect', operation_ref['operation_type']) - self.assertTrue(operation_ref['enabled']) - - def test_scheduled_operation_delete(self): - operation_ref = self._create_scheduled_operation() - db.scheduled_operation_delete(self.ctxt, operation_ref['id']) - - self.assertRaises(exception.ScheduledOperationNotFound, - db.scheduled_operation_delete, - self.ctxt, operation_ref['id']) - - self.assertRaises(exception.ScheduledOperationNotFound, - db.scheduled_operation_get, - self.ctxt, operation_ref['id']) - - self.assertRaises(exception.ScheduledOperationNotFound, - db.scheduled_operation_delete, self.ctxt, '100') - - def test_scheduled_operation_update(self): - operation_ref = self._create_scheduled_operation() - id = operation_ref['id'] - operation_ref = db.scheduled_operation_update(self.ctxt, - id, - {"name": "abc"}) - self.assertEqual('abc', operation_ref['name']) - - operation_ref = db.scheduled_operation_get(self.ctxt, id) - self.assertEqual('abc', operation_ref['name']) - - self.assertRaises(exception.ScheduledOperationNotFound, - db.scheduled_operation_update, - self.ctxt, '100', {"name": "abc"}) - - def test_scheduled_operation_get(self): - operation_ref = self._create_scheduled_operation() - operation_ref = db.scheduled_operation_get(self.ctxt, - operation_ref['id']) - self.assertEqual('protect', operation_ref['operation_type']) - - def test_scheduled_operation_get_join_trigger(self): - def _create_trigger(): - values = { - 'id': "0354ca9ddcd046b693340d78759fd275", - 'name': 'first trigger', - 'project_id': self.ctxt.tenant, - 'type': 'time', - 'properties': '{}', - } - return db.trigger_create(self.ctxt, values) - - trigger_ref = _create_trigger() - operation_ref = self._create_scheduled_operation() - operation_ref = db.scheduled_operation_get( - self.ctxt, - operation_ref['id'], - ['trigger']) - self.assertEqual('protect', operation_ref['operation_type']) - self.assertEqual(trigger_ref['type'], operation_ref.trigger['type']) - - -class ScheduledOperationStateTestCase(base.TestCase): - """Test cases for scheduled_operation_states table.""" - - def setUp(self): - super(ScheduledOperationStateTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id='user_id', - project_id='project_id') - - def _create_scheduled_operation_state(self): - values = { - 'operation_id': '0354ca9ddcd046b693340d78759fd274', - 'service_id': 1, - 'trust_id': '123', - 'state': 'init', - } - return db.scheduled_operation_state_create(self.ctxt, values) - - def test_scheduled_operation_state_create(self): - state_ref = self._create_scheduled_operation_state() - self.assertEqual('init', state_ref['state']) - - def test_scheduled_operation_state_delete(self): - state_ref = self._create_scheduled_operation_state() - db.scheduled_operation_state_delete(self.ctxt, - state_ref['operation_id']) - - self.assertRaises(exception.ScheduledOperationStateNotFound, - db.scheduled_operation_state_delete, - self.ctxt, state_ref['operation_id']) - - self.assertRaises(exception.ScheduledOperationStateNotFound, - db.scheduled_operation_state_get, - self.ctxt, state_ref['operation_id']) - - self.assertRaises(exception.ScheduledOperationStateNotFound, - db.scheduled_operation_state_delete, self.ctxt, 100) - - def test_scheduled_operation_state_update(self): - state_ref = self._create_scheduled_operation_state() - operation_id = state_ref['operation_id'] - state_ref = db.scheduled_operation_state_update(self.ctxt, - operation_id, - {"state": "success"}) - self.assertEqual('success', state_ref['state']) - - state_ref = db.scheduled_operation_state_get(self.ctxt, operation_id) - self.assertEqual('success', state_ref['state']) - - self.assertRaises(exception.ScheduledOperationStateNotFound, - db.scheduled_operation_state_update, - self.ctxt, '100', {"state": "success"}) - - def test_scheduled_operation_state_get(self): - state_ref = self._create_scheduled_operation_state() - state_ref = db.scheduled_operation_state_get(self.ctxt, - state_ref['operation_id']) - self.assertEqual('init', state_ref['state']) - - def test_scheduled_operation_state_get_join_operation(self): - def _create_scheduled_operation(): - values = { - 'id': '0354ca9ddcd046b693340d78759fd274', - 'name': 'protect vm', - 'operation_type': 'protect', - 'user_id': self.ctxt.user_id, - 'project_id': self.ctxt.tenant, - 'trigger_id': '0354ca9ddcd046b693340d78759fd275', - 'operation_definition': '{}' - } - return db.scheduled_operation_create(self.ctxt, values) - - operation_ref = _create_scheduled_operation() - self._create_scheduled_operation_state() - state_ref = db.scheduled_operation_state_get( - self.ctxt, - operation_ref['id'], - ['operation']) - self.assertEqual(operation_ref['id'], state_ref.operation['id']) - - -class ScheduledOperationLogTestCase(base.TestCase): - """Test cases for scheduled_operation_logs table.""" - - def setUp(self): - super(ScheduledOperationLogTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.operation_id = '0354ca9ddcd046b693340d78759fd274' - - def _create_scheduled_operation_log(self, state='in_progress', - created_at=datetime.now()): - values = { - 'operation_id': self.operation_id, - 'state': state, - 'created_at': created_at - } - return db.scheduled_operation_log_create(self.ctxt, values) - - def test_scheduled_operation_log_create(self): - log_ref = self._create_scheduled_operation_log() - self.assertEqual('in_progress', log_ref['state']) - - def test_scheduled_operation_log_delete(self): - log_ref = self._create_scheduled_operation_log() - db.scheduled_operation_log_delete(self.ctxt, log_ref['id']) - - self.assertRaises(exception.ScheduledOperationLogNotFound, - db.scheduled_operation_log_delete, - self.ctxt, log_ref['id']) - - self.assertRaises(exception.ScheduledOperationLogNotFound, - db.scheduled_operation_log_get, - self.ctxt, log_ref['id']) - - self.assertRaises(exception.ScheduledOperationLogNotFound, - db.scheduled_operation_log_delete, - self.ctxt, 100) - - def test_scheduled_operation_log_delete_oldest(self): - log_ids = [] - states = ['success', 'in_progress', 'success', 'success'] - for i in range(4): - t = datetime.now() + timedelta(hours=i) - log = self._create_scheduled_operation_log( - states[i], t) - log_ids.append(log['id']) - - db.scheduled_operation_log_delete_oldest( - self.ctxt, self.operation_id, 3) - self.assertRaises(exception.ScheduledOperationLogNotFound, - db.scheduled_operation_log_get, - self.ctxt, log_ids[0]) - - db.scheduled_operation_log_delete_oldest( - self.ctxt, self.operation_id, 1, ['in_progress']) - log_ref = db.scheduled_operation_log_get(self.ctxt, log_ids[1]) - self.assertEqual('in_progress', log_ref['state']) - self.assertRaises(exception.ScheduledOperationLogNotFound, - db.scheduled_operation_log_get, - self.ctxt, log_ids[2]) - - def test_scheduled_operation_log_update(self): - log_ref = self._create_scheduled_operation_log() - log_id = log_ref['id'] - log_ref = db.scheduled_operation_log_update(self.ctxt, - log_id, - {"state": "success"}) - self.assertEqual('success', log_ref['state']) - - log_ref = db.scheduled_operation_log_get(self.ctxt, log_id) - self.assertEqual('success', log_ref['state']) - - self.assertRaises(exception.ScheduledOperationLogNotFound, - db.scheduled_operation_log_update, - self.ctxt, 100, {"state": "success"}) - - def test_scheduled_operation_log_get(self): - log_ref = self._create_scheduled_operation_log() - log_ref = db.scheduled_operation_log_get(self.ctxt, log_ref['id']) - self.assertEqual('in_progress', log_ref['state']) - - -class PlanDbTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.plan_*.""" - - fake_plan = { - 'name': 'My 3 tier application', - 'description': 'My 3 tier application protection plan', - 'provider_id': 'efc6a88b-9096-4bb6-8634-cda182a6e12a', - 'status': 'suspended', - 'project_id': '39bb894794b741e982bd26144d2949f6', - 'resources': [], - 'parameters': '{OS::Nova::Server: {consistency: os}}' - } - - fake_plan_with_resources = { - 'name': 'My 3 tier application', - 'description': 'My 3 tier application protection plan', - 'provider_id': 'efc6a88b-9096-4bb6-8634-cda182a6e12a', - 'status': 'suspended', - 'project_id': '39bb894794b741e982bd26144d2949f6', - 'resources': [{ - "id": "64e51e85-4f31-441f-9a5d-6e93e3196628", - "type": "OS::Nova::Server", - "name": "vm1"}], - 'parameters': '{OS::Nova::Server: {consistency: os}}' - } - - def setUp(self): - super(PlanDbTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_plan_create(self): - plan = db.plan_create(self.ctxt, self.fake_plan) - self.assertTrue(uuidutils.is_uuid_like(plan['id'])) - self.assertEqual('suspended', plan.status) - - def test_plan_get(self): - plan = db.plan_create(self.ctxt, - self.fake_plan) - self._assertEqualObjects(plan, db.plan_get(self.ctxt, - plan['id']), - ignored_keys='resources') - - def test_plan_destroy(self): - plan = db.plan_create(self.ctxt, self.fake_plan) - db.plan_destroy(self.ctxt, plan['id']) - self.assertRaises(exception.PlanNotFound, db.plan_get, - self.ctxt, plan['id']) - - def test_plan_update(self): - plan = db.plan_create(self.ctxt, self.fake_plan) - db.plan_update(self.ctxt, plan['id'], - {'status': 'started'}) - plan = db.plan_get(self.ctxt, plan['id']) - self.assertEqual('started', plan['status']) - - def test_plan_update_nonexistent(self): - self.assertRaises(exception.PlanNotFound, db.plan_update, - self.ctxt, 42, {}) - - def test_plan_resources_update(self): - resources2 = [{ - "id": "61e51e85-4f31-441f-9a5d-6e93e3194444", - "type": "OS::Cinder::Volume", - "name": "vm2", - "extra_info": "{'availability_zone': 'az1'}"}] - - plan = db.plan_create(self.ctxt, self.fake_plan) - db_meta = db.plan_resources_update(self.ctxt, plan["id"], resources2) - - self.assertEqual("OS::Cinder::Volume", db_meta[0]["resource_type"]) - self.assertEqual("vm2", db_meta[0]["resource_name"]) - self.assertEqual("{'availability_zone': 'az1'}", - db_meta[0]["resource_extra_info"]) - - -class RestoreDbTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.restore_*.""" - - fake_restore = { - "id": "36ea41b2-c358-48a7-9117-70cb7617410a", - "project_id": "586cc6ce-e286-40bd-b2b5-dd32694d9944", - "provider_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "checkpoint_id": "09edcbdc-d1c2-49c1-a212-122627b20968", - "restore_target": "192.168.1.2/identity/", - "parameters": "{'username': 'admin'}", - "status": "SUCCESS" - } - - def setUp(self): - super(RestoreDbTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_restore_create(self): - restore = db.restore_create(self.ctxt, self.fake_restore) - self.assertTrue(uuidutils.is_uuid_like(restore['id'])) - self.assertEqual('SUCCESS', restore.status) - - def test_restore_get(self): - restore = db.restore_create(self.ctxt, - self.fake_restore) - self._assertEqualObjects(restore, db.restore_get(self.ctxt, - restore['id'])) - - def test_restore_destroy(self): - restore = db.restore_create(self.ctxt, self.fake_restore) - db.restore_destroy(self.ctxt, restore['id']) - self.assertRaises(exception.RestoreNotFound, db.restore_get, - self.ctxt, restore['id']) - - def test_restore_update(self): - restore = db.restore_create(self.ctxt, self.fake_restore) - db.restore_update(self.ctxt, restore['id'], - {'status': 'INIT'}) - restore = db.restore_get(self.ctxt, restore['id']) - self.assertEqual('INIT', restore['status']) - - def test_restore_update_nonexistent(self): - self.assertRaises(exception.RestoreNotFound, db.restore_update, - self.ctxt, 42, {}) - - -class VerificationDbTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.verification_*.""" - - fake_verification = { - "id": "36ea41b2-c358-48a7-9117-70cb7617410a", - "project_id": "586cc6ce-e286-40bd-b2b5-dd32694d9944", - "provider_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "checkpoint_id": "09edcbdc-d1c2-49c1-a212-122627b20968", - "parameters": "{'username': 'admin'}", - "status": "SUCCESS" - } - - def setUp(self): - super(VerificationDbTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_verification_create(self): - verification = db.verification_create(self.ctxt, - self.fake_verification) - self.assertTrue(uuidutils.is_uuid_like(verification['id'])) - self.assertEqual('SUCCESS', verification.status) - - def test_verification_get(self): - verification = db.verification_create(self.ctxt, - self.fake_verification) - self._assertEqualObjects(verification, db.verification_get( - self.ctxt, verification['id'])) - - def test_verification_destroy(self): - verification = db.verification_create(self.ctxt, - self.fake_verification) - db.verification_destroy(self.ctxt, verification['id']) - self.assertRaises(exception.VerificationNotFound, - db.verification_get, - self.ctxt, verification['id']) - - def test_verification_update(self): - verification = db.verification_create(self.ctxt, - self.fake_verification) - db.verification_update(self.ctxt, verification['id'], - {'status': 'INIT'}) - verification = db.verification_get(self.ctxt, verification['id']) - self.assertEqual('INIT', verification['status']) - - def test_verification_update_nonexistent(self): - self.assertRaises(exception.VerificationNotFound, - db.verification_update, - self.ctxt, 42, {}) - - -class OperationLogTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.operation_log_*.""" - - fake_operation_log = { - "id": "36ea41b2-c358-48a7-9117-70cb7617410a", - "project_id": "586cc6ce-e286-40bd-b2b5-dd32694d9944", - "operation_type": "protect", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "plan_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "provider_id": "23902b02-5666-4ee6-8dfe-962ac09c3994", - "scheduled_operation_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "status": "failed", - "error_info": "Could not access bank", - "extra_info": "[entries:{'timestamp': '2015-08-27T09:50:51-05:00'," - "'message': 'Doing things'}]" - } - - def setUp(self): - super(OperationLogTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_operation_log_create(self): - operation_log = db.operation_log_create(self.ctxt, - self.fake_operation_log) - self.assertTrue(uuidutils.is_uuid_like(operation_log['id'])) - self.assertEqual('failed', operation_log.status) - - def test_operation_log_get(self): - operation_log = db.operation_log_create(self.ctxt, - self.fake_operation_log) - self._assertEqualObjects(operation_log, db.operation_log_get( - self.ctxt, operation_log['id'])) - - def test_operation_log_destroy(self): - operation_log = db.operation_log_create(self.ctxt, - self.fake_operation_log) - db.operation_log_destroy(self.ctxt, operation_log['id']) - self.assertRaises(exception.OperationLogNotFound, db.operation_log_get, - self.ctxt, operation_log['id']) - - def test_operation_log_update(self): - operation_log = db.operation_log_create(self.ctxt, - self.fake_operation_log) - db.operation_log_update(self.ctxt, operation_log['id'], - {'status': 'finished'}) - operation_log = db.operation_log_get(self.ctxt, operation_log['id']) - self.assertEqual('finished', operation_log['status']) - - def test_operation_log_update_nonexistent(self): - self.assertRaises(exception.OperationLogNotFound, - db.operation_log_update, - self.ctxt, 42, {}) - - -class CheckpointRecordTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.checkpoint_record_*.""" - - fake_checkpoint_record = { - "id": "36ea41b2-c358-48a7-9117-70cb7617410a", - "project_id": "586cc6ce-e286-40bd-b2b5-dd32694d9944", - "checkpoint_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "checkpoint_status": "available", - "provider_id": "39bb894794b741e982bd26144d2949f6", - "plan_id": "efc6a88b-9096-4bb6-8634-cda182a6e12b", - "operation_id": "64e51e85-4f31-441f-9a5d-6e93e3196628", - "create_by": "operation-engine", - "extend_info": "[{" - "'id': '0354ca9d-dcd0-46b6-9334-0d78759fd275'," - "'type': 'OS::Nova::Server'," - "'name': 'vm1'" - "}]" - } - - def setUp(self): - super(CheckpointRecordTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_checkpoint_record_create(self): - checkpoint_record = db.checkpoint_record_create( - self.ctxt, - self.fake_checkpoint_record) - self.assertTrue(uuidutils.is_uuid_like(checkpoint_record['id'])) - self.assertEqual('available', checkpoint_record.checkpoint_status) - - def test_checkpoint_record_get(self): - checkpoint_record = db.checkpoint_record_create( - self.ctxt, - self.fake_checkpoint_record) - self._assertEqualObjects(checkpoint_record, db.checkpoint_record_get( - self.ctxt, checkpoint_record['id'])) - - def test_checkpoint_record_destroy(self): - checkpoint_record = db.checkpoint_record_create( - self.ctxt, - self.fake_checkpoint_record) - db.checkpoint_record_destroy(self.ctxt, checkpoint_record['id']) - self.assertRaises(exception.CheckpointRecordNotFound, - db.checkpoint_record_get, - self.ctxt, checkpoint_record['id']) - - def test_checkpoint_record_update(self): - checkpoint_record = db.checkpoint_record_create( - self.ctxt, - self.fake_checkpoint_record) - db.checkpoint_record_update(self.ctxt, - checkpoint_record['id'], - {'checkpoint_status': 'error'}) - checkpoint_record = db.checkpoint_record_get( - self.ctxt, - checkpoint_record['id']) - self.assertEqual('error', checkpoint_record['checkpoint_status']) - - def test_checkpoint_record_update_nonexistent(self): - self.assertRaises(exception.CheckpointRecordNotFound, - db.checkpoint_record_update, - self.ctxt, 42, {}) - - -class QuotaDbTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.quota_*.""" - - def setUp(self): - super(QuotaDbTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.project_id = "586cc6ce-e286-40bd-b2b5-dd32694d9944" - self.resource = "volume_backups" - self.limit = 10 - - def test_quota_create(self): - quota = db.quota_create(self.ctxt, self.project_id, - self.resource, self.limit) - self.assertEqual("volume_backups", quota.resource) - db.quota_destroy(self.ctxt, self.project_id, self.resource) - - def test_quota_get(self): - quota = db.quota_create(self.ctxt, self.project_id, - self.resource, self.limit) - self._assertEqualObjects(quota, db.quota_get( - self.ctxt, quota.project_id, quota.resource)) - db.quota_destroy(self.ctxt, self.project_id, self.resource) - - def test_quota_destroy(self): - quota = db.quota_create(self.ctxt, self.project_id, - self.resource, self.limit) - db.quota_destroy(self.ctxt, quota.project_id, quota.resource) - self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get, - self.ctxt, quota.project_id, quota.resource) - - def test_quota_update(self): - quota = db.quota_create(self.ctxt, self.project_id, - self.resource, self.limit) - db.quota_update(self.ctxt, quota.project_id, quota.resource, - 20) - quota = db.quota_get(self.ctxt, self.project_id, self.resource) - self.assertEqual(20, quota.hard_limit) - - -class QuotaClassDbTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.quota_class_*.""" - - def setUp(self): - super(QuotaClassDbTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.class_name = "default" - self.resource = "volume_backups" - self.limit = 10 - - def test_quota_class_create(self): - quota_class = db.quota_class_create(self.ctxt, self.class_name, - self.resource, self.limit) - self.assertEqual("volume_backups", quota_class.resource) - db.quota_class_destroy(self.ctxt, self.class_name, self.resource) - - def test_quota_class_get(self): - quota_class = db.quota_class_create(self.ctxt, self.class_name, - self.resource, self.limit) - self._assertEqualObjects(quota_class, db.quota_class_get( - self.ctxt, quota_class.class_name, quota_class.resource)) - db.quota_class_destroy(self.ctxt, self.class_name, self.resource) - - def test_quota_class_destroy(self): - quota_class = db.quota_class_create(self.ctxt, self.class_name, - self.resource, self.limit) - db.quota_class_destroy(self.ctxt, self.class_name, self.resource) - self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get, - self.ctxt, quota_class.class_name, - quota_class.resource) - - def test_quota_class_update(self): - quota_class = db.quota_class_create(self.ctxt, self.class_name, - self.resource, self.limit) - db.quota_class_update(self.ctxt, quota_class.class_name, - quota_class.resource, 20) - quota_class = db.quota_class_get(self.ctxt, self.class_name, - self.resource) - self.assertEqual(20, quota_class.hard_limit) - - -class QuotaUsageDbTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.quota_usage_*.""" - - def setUp(self): - super(QuotaUsageDbTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.project_id = "586cc6ce-e286-40bd-b2b5-dd32694d9944" - self.resource = "volume_backups" - self.in_use = 10 - self.reserved = 10 - self.until_refresh = 0 - - def test_quota_usage_create(self): - quota_usage = db.quota_usage_create( - self.ctxt, self.project_id, "volume_backups", self.in_use, - self.reserved, self.until_refresh) - self.assertEqual("volume_backups", quota_usage.resource) - - def test_quota_usage_get(self): - quota_usage = db.quota_usage_create( - self.ctxt, self.project_id, "volume_backups_get", self.in_use, - self.reserved, self.until_refresh) - self._assertEqualObjects(quota_usage, db.quota_usage_get( - self.ctxt, self.project_id, "volume_backups_get")) - - -class ReservationDbTestCase(ModelBaseTestCase): - """Unit tests for karbor.db.api.reservation_*.""" - - def setUp(self): - super(ReservationDbTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.project_id = "586cc6ce-e286-40bd-b2b5-dd32694d9944" - self.resource = "volume_backups" - self.in_use = 10 - self.reserved = 10 - self.until_refresh = 0 - - def test_reservation_create(self): - quota_usage = db.quota_usage_create( - self.ctxt, self.project_id, "volume_backups", self.in_use, - self.reserved, self.until_refresh) - reservation = db.reservation_create( - self.ctxt, str(uuid.uuid4()), quota_usage, - self.project_id, "volume_backups", 1, - timeutils.utcnow()) - self.assertEqual("volume_backups", quota_usage.resource) - self.assertEqual("volume_backups", reservation.resource) - - def test_reservation_get(self): - quota_usage = db.quota_usage_create( - self.ctxt, self.project_id, "volume_backups_get", self.in_use, - self.reserved, self.until_refresh) - reservation = db.reservation_create( - self.ctxt, str(uuid.uuid4()), quota_usage, - self.project_id, "volume_backups_get", 1, - timeutils.utcnow()) - self._assertEqualObjects(reservation, db.reservation_get( - self.ctxt, reservation.uuid)) diff --git a/karbor/tests/unit/db/test_purge.py b/karbor/tests/unit/db/test_purge.py deleted file mode 100644 index 53b748d1..00000000 --- a/karbor/tests/unit/db/test_purge.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (C) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for db purge.""" - -import datetime - -from oslo_db import exception as db_exc -from oslo_utils import timeutils -from oslo_utils import uuidutils -from sqlalchemy.dialects import sqlite - -from karbor import context -from karbor import db -from karbor.db.sqlalchemy import api as db_api -from karbor import exception -from karbor.tests import base - -from oslo_db.sqlalchemy import utils as sqlalchemyutils - - -class PurgeDeletedTest(base.TestCase): - - def setUp(self): - super(PurgeDeletedTest, self).setUp() - self.context = context.get_admin_context() - self.engine = db_api.get_engine() - self.session = db_api.get_session() - self.conn = self.engine.connect() - self.plans = sqlalchemyutils.get_table( - self.engine, "plans") - # The resources table has a FK of plans.id - self.resources = sqlalchemyutils.get_table( - self.engine, "resources") - - self.uuidstrs = [] - for unused in range(6): - self.uuidstrs.append(uuidutils.generate_uuid(dashed=False)) - # Add 6 rows to table - for uuidstr in self.uuidstrs: - ins_stmt = self.plans.insert().values(id=uuidstr) - self.conn.execute(ins_stmt) - ins_stmt = self.resources.insert().values(plan_id=uuidstr) - self.conn.execute(ins_stmt) - - # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago - old = timeutils.utcnow() - datetime.timedelta(days=20) - older = timeutils.utcnow() - datetime.timedelta(days=60) - make_plans_old = self.plans.update().where( - self.plans.c.id.in_(self.uuidstrs[1:3])).values( - deleted_at=old) - make_plans_older = self.plans.update().where( - self.plans.c.id.in_(self.uuidstrs[4:6])).values( - deleted_at=older) - make_resources_old = self.resources.update().where( - self.resources.c.plan_id.in_(self.uuidstrs[1:3])).values( - deleted_at=old) - make_resources_older = self.resources.update().where( - self.resources.c.plan_id.in_(self.uuidstrs[4:6])).values( - deleted_at=older) - - self.conn.execute(make_plans_old) - self.conn.execute(make_plans_older) - self.conn.execute(make_resources_old) - self.conn.execute(make_resources_older) - - def test_purge_deleted_rows_old(self): - dialect = self.engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # Force foreign_key checking if running SQLite >= 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] > 3 or (tup[0] == 3 and tup[1] >= 7): - self.conn.execute("PRAGMA foreign_keys = ON") - # Purge at 30 days old, should only delete 2 rows - db.purge_deleted_rows(self.context, age_in_days=30) - plans_rows = self.session.query(self.plans).count() - resources_rows = self.session.query(self.resources).count() - # Verify that we only deleted 2 - self.assertEqual(4, plans_rows) - self.assertEqual(4, resources_rows) - - def test_purge_deleted_rows_older(self): - dialect = self.engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # Force foreign_key checking if running SQLite >= 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] > 3 or (tup[0] == 3 and tup[1] >= 7): - self.conn.execute("PRAGMA foreign_keys = ON") - # Purge at 10 days old now, should delete 2 more rows - db.purge_deleted_rows(self.context, age_in_days=10) - - plans_rows = self.session.query(self.plans).count() - resources_rows = self.session.query(self.resources).count() - # Verify that we only have 2 rows now - self.assertEqual(2, plans_rows) - self.assertEqual(2, resources_rows) - - def test_purge_deleted_rows_bad_args(self): - # Test with no age argument - self.assertRaises(TypeError, db.purge_deleted_rows, self.context) - # Test purge with non-integer - self.assertRaises(exception.InvalidParameterValue, - db.purge_deleted_rows, self.context, - age_in_days='ten') - - def test_purge_deleted_rows_integrity_failure(self): - dialect = self.engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # So return early to skip this test if running SQLite < 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): - self.skipTest( - 'sqlite version too old for reliable SQLA foreign_keys') - self.conn.execute("PRAGMA foreign_keys = ON") - - # add new entry in plans and resources for - # integrity check - uuid_str = uuidutils.generate_uuid(dashed=False) - ins_stmt = self.plans.insert().values(id=uuid_str) - self.conn.execute(ins_stmt) - ins_stmt = self.resources.insert().values( - plan_id=uuid_str) - self.conn.execute(ins_stmt) - - # set plans record to deleted 20 days ago - old = timeutils.utcnow() - datetime.timedelta(days=20) - make_old = self.plans.update().where( - self.plans.c.id.in_([uuid_str])).values(deleted_at=old) - self.conn.execute(make_old) - - # Verify that purge_deleted_rows fails due to Foreign Key constraint - self.assertRaises(db_exc.DBReferenceError, db.purge_deleted_rows, - self.context, age_in_days=10) diff --git a/karbor/tests/unit/fake_bank.py b/karbor/tests/unit/fake_bank.py deleted file mode 100644 index aa9b5390..00000000 --- a/karbor/tests/unit/fake_bank.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from karbor.services.protection import bank_plugin - -fake_bank_opts = [ - cfg.HostAddressOpt('fake_host'), -] - - -class FakeBankPlugin(bank_plugin.BankPlugin): - def __init__(self, config=None): - super(FakeBankPlugin, self).__init__(config) - config.register_opts(fake_bank_opts, 'fake_bank') - self.fake_host = config['fake_bank']['fake_host'] - - def update_object(self, key, value, context=None): - return - - def get_object(self, key, context=None): - return - - def list_objects(self, prefix=None, limit=None, - marker=None, sort_dir=None, context=None): - return - - def delete_object(self, key, context=None): - return - - def get_owner_id(self, context=None): - return diff --git a/karbor/tests/unit/fake_operation_log.py b/karbor/tests/unit/fake_operation_log.py deleted file mode 100644 index 922ad101..00000000 --- a/karbor/tests/unit/fake_operation_log.py +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from karbor import objects - - -def fake_db_operation_log(**updates): - db_operation_log = { - "id": "36ea41b2-c358-48a7-9117-70cb7617410a", - "project_id": "586cc6ce-e286-40bd-b2b5-dd32694d9944", - "operation_type": "protect", - "checkpoint_id": "dcb20606-ad71-40a3-80e4-ef0fafdad0c3", - "plan_id": "cf56bd3e-97a7-4078-b6d5-f36246333fd9", - "provider_id": "23902b02-5666-4ee6-8dfe-962ac09c3994", - "scheduled_operation_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "status": "failed", - "error_info": "Could not access bank", - "extra_info": "[entries:{'timestamp': '2015-08-27T09:50:51-05:00'," - "'message': 'Doing things'}]" - } - for name, field in objects.OperationLog.fields.items(): - if name in db_operation_log: - continue - if field.nullable: - db_operation_log[name] = None - elif field.default != fields.UnspecifiedDefault: - db_operation_log[name] = field.default - else: - raise Exception('db_operation_log needs help with %s.' % name) - - if updates: - db_operation_log.update(updates) - - return db_operation_log diff --git a/karbor/tests/unit/fake_plan.py b/karbor/tests/unit/fake_plan.py deleted file mode 100644 index cb2f8f44..00000000 --- a/karbor/tests/unit/fake_plan.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from karbor import objects - -db_plan = { - 'id': '1', - 'name': 'My 3 tier application', - 'provider_id': 'efc6a88b-9096-4bb6-8634-cda182a6e12a', - 'status': 'started', - 'project_id': '39bb894794b741e982bd26144d2949f6', - 'resources': [], - 'parameters': '{"OS::Nova::Server": {"consistency": "os"}}', -} - - -def fake_db_plan(**updates): - for name, field in objects.Plan.fields.items(): - if name in db_plan: - continue - if field.nullable: - db_plan[name] = None - elif field.default != fields.UnspecifiedDefault: - db_plan[name] = field.default - else: - raise Exception('fake_db_plan needs help with %s.' % name) - - if updates: - db_plan.update(updates) - - return db_plan diff --git a/karbor/tests/unit/fake_providers/fake_provider1.conf b/karbor/tests/unit/fake_providers/fake_provider1.conf deleted file mode 100644 index 4825d8ce..00000000 --- a/karbor/tests/unit/fake_providers/fake_provider1.conf +++ /dev/null @@ -1,13 +0,0 @@ -[provider] -name = fake_provider1 -id = fake_id1 -description = Test Provider 1 -bank = karbor.tests.unit.fake_bank.FakeBankPlugin -plugin = karbor.tests.unit.protection.fakes.FakeProtectionPlugin -enabled = True - -[fake_plugin] -fake_user = user - -[fake_bank] -fake_host = thor diff --git a/karbor/tests/unit/fake_providers/fake_provider2.conf b/karbor/tests/unit/fake_providers/fake_provider2.conf deleted file mode 100644 index 9fe502de..00000000 --- a/karbor/tests/unit/fake_providers/fake_provider2.conf +++ /dev/null @@ -1,5 +0,0 @@ -[provider] -name = fake_provider2 -id = fake_id2 -description = Test Provider 2 -enabled = True diff --git a/karbor/tests/unit/fake_restore.py b/karbor/tests/unit/fake_restore.py deleted file mode 100644 index 59fd7844..00000000 --- a/karbor/tests/unit/fake_restore.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from karbor import objects - - -def fake_db_restore(**updates): - db_restore = { - "id": "36ea41b2-c358-48a7-9117-70cb7617410a", - "project_id": "586cc6ce-e286-40bd-b2b5-dd32694d9944", - "provider_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "checkpoint_id": "09edcbdc-d1c2-49c1-a212-122627b20968", - "restore_target": "192.168.1.2/identity/", - "parameters": '{}', - "restore_auth": '{"type": "password", "username": "admin",' - '"password": "test" }', - "status": "SUCCESS" - } - for name, field in objects.Restore.fields.items(): - if name in db_restore: - continue - if field.nullable: - db_restore[name] = None - elif field.default != fields.UnspecifiedDefault: - db_restore[name] = field.default - else: - raise Exception('fake_db_restore needs help with %s.' % name) - - if updates: - db_restore.update(updates) - - return db_restore diff --git a/karbor/tests/unit/fake_service.py b/karbor/tests/unit/fake_service.py deleted file mode 100644 index c551f59c..00000000 --- a/karbor/tests/unit/fake_service.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -from oslo_versionedobjects import fields - -from karbor import objects - - -def fake_db_service(**updates): - NOW = timeutils.utcnow().replace(microsecond=0) - db_service = { - 'created_at': NOW, - 'updated_at': None, - 'deleted_at': None, - 'deleted': False, - 'id': 123, - 'host': 'fake-host', - 'binary': 'fake-service', - 'topic': 'fake-service-topic', - 'report_count': 1, - 'disabled': False, - 'disabled_reason': None, - 'modified_at': NOW, - } - - for name, field in objects.Service.fields.items(): - if name in db_service: - continue - if field.nullable: - db_service[name] = None - elif field.default != fields.UnspecifiedDefault: - db_service[name] = field.default - else: - raise Exception('fake_db_service needs help with %s.' % name) - - if updates: - db_service.update(updates) - - return db_service - - -def fake_service_obj(context, **updates): - return objects.Service._from_db_object(context, objects.Service(), - fake_db_service(**updates)) diff --git a/karbor/tests/unit/fake_verification.py b/karbor/tests/unit/fake_verification.py deleted file mode 100644 index b06fe419..00000000 --- a/karbor/tests/unit/fake_verification.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from karbor import objects - - -def fake_db_verification(**updates): - db_verification = { - "id": "36ea41b2-c358-48a7-9117-70cb7617410a", - "project_id": "586cc6ce-e286-40bd-b2b5-dd32694d9944", - "provider_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "checkpoint_id": "09edcbdc-d1c2-49c1-a212-122627b20968", - "parameters": '{}', - "status": "SUCCESS" - } - for name, field in objects.Verification.fields.items(): - if name in db_verification: - continue - if field.nullable: - db_verification[name] = None - elif field.default != fields.UnspecifiedDefault: - db_verification[name] = field.default - else: - raise Exception('fake_db_verification needs help with %s.' % name) - - if updates: - db_verification.update(updates) - - return db_verification diff --git a/karbor/tests/unit/objects/__init__.py b/karbor/tests/unit/objects/__init__.py deleted file mode 100644 index 3d184593..00000000 --- a/karbor/tests/unit/objects/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -from oslo_versionedobjects import fields - -from karbor import context -from karbor.objects import base as obj_base -from karbor.tests import base - - -class BaseObjectsTestCase(base.TestCase): - def setUp(self): - super(BaseObjectsTestCase, self).setUp() - self.user_id = 'fake-user' - self.project_id = 'fake-project' - self.context = context.RequestContext(self.user_id, self.project_id, - is_admin=False) - # We only test local right now. - self.assertIsNone(obj_base.KarborObject.indirection_api) - - @staticmethod - def _compare(test, db, obj): - for field, value in db.items(): - if not hasattr(obj, field): - continue - - if (isinstance(obj.fields[field], fields.DateTimeField) and - db[field]): - test.assertEqual(db[field], - timeutils.normalize_time(obj[field])) - elif isinstance(obj[field], obj_base.ObjectListBase): - test.assertEqual(db[field], obj[field].objects) - else: - test.assertEqual(db[field], obj[field]) diff --git a/karbor/tests/unit/objects/test_base.py b/karbor/tests/unit/objects/test_base.py deleted file mode 100644 index 988f698a..00000000 --- a/karbor/tests/unit/objects/test_base.py +++ /dev/null @@ -1,154 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -from unittest import mock - -from iso8601 import iso8601 -from oslo_utils import uuidutils -from oslo_versionedobjects import fields - -from karbor import objects -from karbor.tests.unit import objects as test_objects - - -@objects.base.KarborObjectRegistry.register_if(False) -class TestObject(objects.base.KarborObject): - fields = { - 'scheduled_at': objects.base.fields.DateTimeField(nullable=True), - 'uuid': objects.base.fields.UUIDField(), - 'text': objects.base.fields.StringField(nullable=True), - } - - -class TestKarborObject(test_objects.BaseObjectsTestCase): - """Tests methods from KarborObject.""" - - def setUp(self): - super(TestKarborObject, self).setUp() - self.obj = TestObject( - scheduled_at=None, - uuid=uuidutils.generate_uuid(), - text='text') - self.obj.obj_reset_changes() - - def test_karbor_obj_get_changes_no_changes(self): - self.assertEqual({}, self.obj.karbor_obj_get_changes()) - - def test_karbor_obj_get_changes_other_changes(self): - self.obj.text = 'text2' - self.assertEqual({'text': 'text2'}, - self.obj.karbor_obj_get_changes()) - - def test_karbor_obj_get_changes_datetime_no_tz(self): - now = datetime.datetime.utcnow() - self.obj.scheduled_at = now - self.assertEqual({'scheduled_at': now}, - self.obj.karbor_obj_get_changes()) - - def test_karbor_obj_get_changes_datetime_tz_utc(self): - now_tz = iso8601.parse_date('2015-06-26T22:00:01Z') - now = now_tz.replace(tzinfo=None) - self.obj.scheduled_at = now_tz - self.assertEqual({'scheduled_at': now}, - self.obj.karbor_obj_get_changes()) - - def test_karbor_obj_get_changes_datetime_tz_non_utc_positive(self): - now_tz = iso8601.parse_date('2015-06-26T22:00:01+01') - now = now_tz.replace(tzinfo=None) - datetime.timedelta(hours=1) - self.obj.scheduled_at = now_tz - self.assertEqual({'scheduled_at': now}, - self.obj.karbor_obj_get_changes()) - - def test_karbor_obj_get_changes_datetime_tz_non_utc_negative(self): - now_tz = iso8601.parse_date('2015-06-26T10:00:01-05') - now = now_tz.replace(tzinfo=None) + datetime.timedelta(hours=5) - self.obj.scheduled_at = now_tz - self.assertEqual({'scheduled_at': now}, - self.obj.karbor_obj_get_changes()) - - def test_refresh(self): - @objects.base.KarborObjectRegistry.register_if(False) - class MyTestObject(objects.base.KarborObject, - objects.base.KarborObjectDictCompat, - objects.base.KarborComparableObject): - fields = {'id': fields.UUIDField(), - 'name': fields.StringField()} - - test_obj = MyTestObject(id='1', name='foo') - refresh_obj = MyTestObject(id='1', name='bar') - with mock.patch( - 'karbor.objects.base.KarborObject.get_by_id') as get_by_id: - get_by_id.return_value = refresh_obj - - test_obj.refresh() - self._compare(self, refresh_obj, test_obj) - - def test_refresh_no_id_field(self): - @objects.base.KarborObjectRegistry.register_if(False) - class MyTestObjectNoId(objects.base.KarborObject, - objects.base.KarborObjectDictCompat, - objects.base.KarborComparableObject): - fields = {'uuid': fields.UUIDField()} - - test_obj = MyTestObjectNoId(uuid='1', name='foo') - self.assertRaises(NotImplementedError, test_obj.refresh) - - -class TestKarborComparableObject(test_objects.BaseObjectsTestCase): - def test_comparable_objects(self): - @objects.base.KarborObjectRegistry.register - class MyComparableObj(objects.base.KarborObject, - objects.base.KarborObjectDictCompat, - objects.base.KarborComparableObject): - fields = {'foo': fields.Field(fields.Integer())} - - class NonVersionedObject(object): - pass - - obj1 = MyComparableObj(foo=1) - obj2 = MyComparableObj(foo=1) - obj3 = MyComparableObj(foo=2) - obj4 = NonVersionedObject() - self.assertTrue(obj1 == obj2) - self.assertFalse(obj1 == obj3) - self.assertFalse(obj1 == obj4) - self.assertNotEqual(obj1, None) - - -class TestKarborDictObject(test_objects.BaseObjectsTestCase): - @objects.base.KarborObjectRegistry.register_if(False) - class TestDictObject(objects.base.KarborObjectDictCompat, - objects.base.KarborObject): - obj_extra_fields = ['foo'] - - fields = { - 'abc': fields.StringField(nullable=True), - 'def': fields.IntegerField(nullable=True), - } - - @property - def foo(self): - return 42 - - def test_dict_objects(self): - obj = self.TestDictObject() - self.assertIsNone(obj.get('non_existing')) - self.assertEqual('val', obj.get('abc', 'val')) - self.assertIsNone(obj.get('abc')) - obj.abc = 'val2' - self.assertEqual('val2', obj.get('abc', 'val')) - self.assertEqual(42, obj.get('foo')) - - self.assertIn('foo', obj) - self.assertIn('abc', obj) - self.assertNotIn('def', obj) diff --git a/karbor/tests/unit/objects/test_checkpoint_record.py b/karbor/tests/unit/objects/test_checkpoint_record.py deleted file mode 100644 index 5b288626..00000000 --- a/karbor/tests/unit/objects/test_checkpoint_record.py +++ /dev/null @@ -1,96 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import timeutils - -from karbor import objects -from karbor.tests.unit import objects as test_objects - -NOW = timeutils.utcnow().replace(microsecond=0) - -CheckpointRecord_ID = '36ea41b2-c358-48a7-9117-70cb7617410a' - -Fake_CheckpointRecord = { - 'created_at': NOW, - 'deleted_at': None, - 'updated_at': NOW, - 'deleted': False, - "id": CheckpointRecord_ID, - "project_id": "586cc6ce-e286-40bd-b2b5-dd32694d9944", - "checkpoint_id": "2220f8b1-975d-4621-a872-fa9afb43cb6c", - "checkpoint_status": "available", - "provider_id": "39bb894794b741e982bd26144d2949f6", - "plan_id": "efc6a88b-9096-4bb6-8634-cda182a6e12b", - "operation_id": "64e51e85-4f31-441f-9a5d-6e93e3196628", - "create_by": "operation-engine", - "extend_info": "[{" - "'id': '0354ca9d-dcd0-46b6-9334-0d78759fd275'," - "'type': 'OS::Nova::Server'," - "'name': 'vm1'" - "}]" -} - - -class TestCheckpointRecord(test_objects.BaseObjectsTestCase): - CheckpointRecord_Class = objects.CheckpointRecord - - @mock.patch('karbor.db.checkpoint_record_get') - def test_get_by_id(self, checkpoint_record_get): - db_checkpoint_record = Fake_CheckpointRecord.copy() - checkpoint_record_get.return_value = db_checkpoint_record - - checkpoint_record = self.CheckpointRecord_Class.get_by_id( - self.context, - CheckpointRecord_ID) - self._compare(self, db_checkpoint_record, checkpoint_record) - checkpoint_record_get.assert_called_once_with( - self.context, - CheckpointRecord_ID) - - @mock.patch('karbor.db.checkpoint_record_create') - def test_create(self, checkpoint_record_create): - db_checkpoint_record = Fake_CheckpointRecord.copy() - checkpoint_record_create.return_value = db_checkpoint_record - - checkpoint_record = self.CheckpointRecord_Class(context=self.context) - checkpoint_record.create() - self._compare(self, db_checkpoint_record, checkpoint_record) - checkpoint_record_create.assert_called_once_with(self.context, {}) - - @mock.patch('karbor.db.checkpoint_record_update') - def test_save(self, checkpoint_record_update): - db_checkpoint_record = Fake_CheckpointRecord - checkpoint_record = self.CheckpointRecord_Class._from_db_object( - self.context, - self.CheckpointRecord_Class(), - db_checkpoint_record) - checkpoint_record.checkpoint_status = 'error' - checkpoint_record.save() - - checkpoint_record_update.assert_called_once_with( - self.context, - checkpoint_record.id, - {'checkpoint_status': 'error'}) - - @mock.patch('karbor.db.checkpoint_record_destroy') - def test_destroy(self, checkpoint_record_destroy): - db_checkpoint_record = Fake_CheckpointRecord - checkpoint_record = self.CheckpointRecord_Class._from_db_object( - self.context, - self.CheckpointRecord_Class(), - db_checkpoint_record) - checkpoint_record.destroy() - checkpoint_record_destroy.assert_called_once_with( - self.context, - checkpoint_record.id) diff --git a/karbor/tests/unit/objects/test_operation_log.py b/karbor/tests/unit/objects/test_operation_log.py deleted file mode 100644 index 793cac8b..00000000 --- a/karbor/tests/unit/objects/test_operation_log.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from karbor import objects -from karbor.tests.unit import fake_operation_log -from karbor.tests.unit import objects as test_objects - - -class TestOperationLog(test_objects.BaseObjectsTestCase): - @staticmethod - def _compare(test, db, obj): - db = {k: v for k, v in db.items()} - test_objects.BaseObjectsTestCase._compare(test, db, obj) - - @mock.patch('karbor.objects.OperationLog.get_by_id') - def test_get_by_id(self, operation_log_get): - db_operation_log = fake_operation_log.fake_db_operation_log() - operation_log_get.return_value = db_operation_log - operation_log = objects.OperationLog.get_by_id(self.context, "1") - operation_log_get.assert_called_once_with(self.context, "1") - self._compare(self, db_operation_log, operation_log) - - @mock.patch('karbor.db.sqlalchemy.api.operation_log_create') - def test_create(self, operation_log_create): - db_operation_log = fake_operation_log.fake_db_operation_log() - operation_log_create.return_value = db_operation_log - operation_log = objects.OperationLog(context=self.context) - operation_log.create() - self.assertEqual(db_operation_log['id'], operation_log.id) - - @mock.patch('karbor.db.sqlalchemy.api.operation_log_update') - def test_save(self, operation_log_update): - db_operation_log = fake_operation_log.fake_db_operation_log() - operation_log = objects.OperationLog._from_db_object( - self.context, objects.OperationLog(), db_operation_log) - operation_log.status = 'finished' - operation_log.save() - operation_log_update.assert_called_once_with( - self.context, operation_log.id, {'status': 'finished'}) - - @mock.patch('karbor.db.sqlalchemy.api.operation_log_destroy') - def test_destroy(self, operation_log_destroy): - db_operation_log = fake_operation_log.fake_db_operation_log() - operation_log = objects.OperationLog._from_db_object( - self.context, objects.OperationLog(), db_operation_log) - operation_log.destroy() - self.assertTrue(operation_log_destroy.called) - admin_context = operation_log_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - - def test_obj_field_status(self): - operation_log = objects.OperationLog(context=self.context, - status='finished') - self.assertEqual('finished', operation_log.status) diff --git a/karbor/tests/unit/objects/test_plan.py b/karbor/tests/unit/objects/test_plan.py deleted file mode 100644 index 3f10de11..00000000 --- a/karbor/tests/unit/objects/test_plan.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_serialization import jsonutils - -from karbor import objects -from karbor.tests.unit import fake_plan -from karbor.tests.unit import objects as test_objects - - -class TestPlan(test_objects.BaseObjectsTestCase): - @staticmethod - def _compare(test, db, obj): - db = {k: v for k, v in db.items() - if not k.endswith('resources')} - test_objects.BaseObjectsTestCase._compare(test, db, obj) - - @mock.patch('karbor.objects.Plan.get_by_id') - def test_get_by_id(self, plan_get): - db_plan = fake_plan.fake_db_plan() - plan_get.return_value = db_plan - plan = objects.Plan.get_by_id(self.context, "1") - plan_get.assert_called_once_with(self.context, "1") - self._compare(self, db_plan, plan) - - @mock.patch('karbor.db.sqlalchemy.api.plan_create') - def test_create(self, plan_create): - db_plan = fake_plan.fake_db_plan() - plan_create.return_value = db_plan - plan = objects.Plan(context=self.context) - plan.create() - self.assertEqual(db_plan['id'], plan.id) - - @mock.patch('karbor.db.sqlalchemy.api.plan_update') - def test_save(self, plan_update): - db_plan = fake_plan.fake_db_plan() - plan = objects.Plan._from_db_object(self.context, - objects.Plan(), db_plan) - plan.name = 'planname' - plan.save() - plan_update.assert_called_once_with(self.context, plan.id, - {'name': 'planname'}) - - @mock.patch('karbor.db.sqlalchemy.api.plan_resources_update', - return_value=[ - {'resource_id': 'key1', - "resource_type": "value1", - "extra_info": "{'availability_zone': 'az1'}"} - ]) - @mock.patch('karbor.db.sqlalchemy.api.plan_update') - def test_save_with_resource(self, plan_update, resource_update): - db_plan = fake_plan.fake_db_plan() - plan = objects.Plan._from_db_object(self.context, - objects.Plan(), db_plan) - plan.name = 'planname' - plan.resources = [{'id': 'key1', - "type": "value1", - "extra_info": "{'availability_zone': 'az1'}"}] - self.assertEqual({'name': 'planname', - 'resources': [{'id': 'key1', - "type": "value1", - "extra_info": - "{'availability_zone': 'az1'}"}]}, - plan.obj_get_changes()) - plan.save() - plan_update.assert_called_once_with(self.context, plan.id, - {'name': 'planname'}) - resource_update.assert_called_once_with( - self.context, plan.id, [{'id': 'key1', "type": "value1", - "extra_info": - "{'availability_zone': 'az1'}"}]) - - @mock.patch('karbor.db.sqlalchemy.api.plan_destroy') - def test_destroy(self, plan_destroy): - db_plan = fake_plan.fake_db_plan() - plan = objects.Plan._from_db_object(self.context, - objects.Plan(), db_plan) - plan.destroy() - self.assertTrue(plan_destroy.called) - admin_context = plan_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - - def test_parameters(self): - db_plan = fake_plan.fake_db_plan() - plan = objects.Plan._from_db_object(self.context, - objects.Plan(), db_plan) - self.assertEqual(plan.parameters, - jsonutils.loads(fake_plan.db_plan['parameters'])) - - def test_obj_fields(self): - plan = objects.Plan(context=self.context, id="2", name="testname") - self.assertEqual(['plan_resources'], plan.obj_extra_fields) - self.assertEqual('testname', plan.name) - self.assertEqual('2', plan.id) - - def test_obj_field_status(self): - plan = objects.Plan(context=self.context, - status='suspending') - self.assertEqual('suspending', plan.status) diff --git a/karbor/tests/unit/objects/test_restore.py b/karbor/tests/unit/objects/test_restore.py deleted file mode 100644 index b52a7797..00000000 --- a/karbor/tests/unit/objects/test_restore.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from karbor import objects -from karbor.tests.unit import fake_restore -from karbor.tests.unit import objects as test_objects - - -class TestRestore(test_objects.BaseObjectsTestCase): - @staticmethod - def _compare(test, db, obj): - db = {k: v for k, v in db.items()} - test_objects.BaseObjectsTestCase._compare(test, db, obj) - - @mock.patch('karbor.objects.Restore.get_by_id') - def test_get_by_id(self, restore_get): - db_restore = fake_restore.fake_db_restore() - restore_get.return_value = db_restore - restore = objects.Restore.get_by_id(self.context, "1") - restore_get.assert_called_once_with(self.context, "1") - self._compare(self, db_restore, restore) - - @mock.patch('karbor.db.sqlalchemy.api.restore_create') - def test_create(self, restore_create): - db_restore = fake_restore.fake_db_restore() - restore_create.return_value = db_restore - restore = objects.Restore(context=self.context) - restore.create() - self.assertEqual(db_restore['id'], restore.id) - - @mock.patch('karbor.db.sqlalchemy.api.restore_update') - def test_save(self, restore_update): - db_restore = fake_restore.fake_db_restore() - restore = objects.Restore._from_db_object( - self.context, objects.Restore(), db_restore) - restore.status = 'FAILED' - restore.save() - restore_update.assert_called_once_with(self.context, restore.id, - {'status': 'FAILED'}) - - @mock.patch('karbor.db.sqlalchemy.api.restore_destroy') - def test_destroy(self, restore_destroy): - db_restore = fake_restore.fake_db_restore() - restore = objects.Restore._from_db_object( - self.context, objects.Restore(), db_restore) - restore.destroy() - self.assertTrue(restore_destroy.called) - admin_context = restore_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - - def test_obj_field_status(self): - restore = objects.Restore(context=self.context, - status='FAILED') - self.assertEqual('FAILED', restore.status) diff --git a/karbor/tests/unit/objects/test_scheduled_operation.py b/karbor/tests/unit/objects/test_scheduled_operation.py deleted file mode 100644 index 3640d897..00000000 --- a/karbor/tests/unit/objects/test_scheduled_operation.py +++ /dev/null @@ -1,114 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from karbor import objects -from karbor.tests.unit import objects as test_objects - -NOW = timeutils.utcnow().replace(microsecond=0) - -Operation_ID = '0354ca9ddcd046b693340d78759fd274' - -Fake_Operation = { - 'created_at': NOW, - 'deleted_at': None, - 'updated_at': NOW, - 'deleted': False, - 'id': Operation_ID, - 'name': 'protect vm', - 'description': 'protect vm resource', - 'operation_type': 'protect', - 'user_id': '123', - 'project_id': '123', - 'trigger_id': '0354ca9ddcd046b693340d78759fd275', - 'operation_definition': '{}' -} - - -class TestScheduledOperation(test_objects.BaseObjectsTestCase): - Operation_Class = objects.ScheduledOperation - - @mock.patch('karbor.db.scheduled_operation_get') - def test_get_by_id(self, operation_get): - db_op = Fake_Operation.copy() - operation_get.return_value = db_op - - op = self.Operation_Class.get_by_id(self.context, Operation_ID) - db_op['operation_definition'] = jsonutils.loads( - db_op['operation_definition']) - self._compare(self, db_op, op) - operation_get.assert_called_once_with(self.context, Operation_ID, []) - - @mock.patch('karbor.db.scheduled_operation_get') - def test_get_join_trigger(self, operation_get): - db_op = Fake_Operation.copy() - db_op['trigger'] = { - 'created_at': NOW, - 'deleted_at': None, - 'updated_at': NOW, - 'deleted': False, - 'id': '123', - 'name': 'daily', - 'project_id': '123', - 'type': 'time', - 'properties': '{}' - } - operation_get.return_value = db_op - - op = self.Operation_Class.get_by_id(self.context, - Operation_ID, ['trigger']) - db_op['operation_definition'] = jsonutils.loads( - db_op['operation_definition']) - self.assertEqual(db_op['trigger']['type'], op.trigger.type) - operation_get.assert_called_once_with(self.context, - Operation_ID, ['trigger']) - - @mock.patch('karbor.db.scheduled_operation_create') - def test_create(self, operation_create): - db_op = Fake_Operation.copy() - operation_create.return_value = db_op - - op = self.Operation_Class(context=self.context) - op.create() - db_op['operation_definition'] = jsonutils.loads( - db_op['operation_definition']) - self._compare(self, db_op, op) - operation_create.assert_called_once_with(self.context, {}) - - @mock.patch('karbor.db.scheduled_operation_update') - def test_save(self, operation_update): - db_op = Fake_Operation - op = self.Operation_Class._from_db_object(self.context, - self.Operation_Class(), - db_op) - fake_op_def = {'a': '1'} - op.name = 'protect volume' - op.operation_definition = fake_op_def - op.save() - - operation_update.assert_called_once_with( - self.context, op.id, - {'name': 'protect volume', - 'operation_definition': jsonutils.dumps(fake_op_def)}) - - @mock.patch('karbor.db.scheduled_operation_delete') - def test_destroy(self, operation_delete): - db_op = Fake_Operation - op = self.Operation_Class._from_db_object(self.context, - self.Operation_Class(), - db_op) - op.destroy() - operation_delete.assert_called_once_with(self.context, op.id) diff --git a/karbor/tests/unit/objects/test_scheduled_operation_log.py b/karbor/tests/unit/objects/test_scheduled_operation_log.py deleted file mode 100644 index fb04e9e5..00000000 --- a/karbor/tests/unit/objects/test_scheduled_operation_log.py +++ /dev/null @@ -1,112 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import timeutils - -from karbor import exception -from karbor import objects -from karbor.tests.unit import objects as test_objects - -NOW = timeutils.utcnow().replace(microsecond=0) - -Log_ID = 0 - -Fake_Log = { - 'created_at': NOW, - 'deleted_at': None, - 'updated_at': NOW, - 'deleted': False, - 'id': Log_ID, - 'operation_id': '123', - 'expect_start_time': NOW, - 'triggered_time': NOW, - 'actual_start_time': NOW, - 'end_time': NOW, - 'state': 'in_progress', - 'extend_info': '', -} - - -class TestScheduledOperationLog(test_objects.BaseObjectsTestCase): - def setUp(self): - super(TestScheduledOperationLog, self).setUp() - - self.log_class = objects.ScheduledOperationLog - self.db_log = Fake_Log - - @mock.patch('karbor.db.scheduled_operation_log_get') - def test_get_by_id(self, log_get): - log_get.return_value = self.db_log - - log = self.log_class.get_by_id(self.context, Log_ID) - self._compare(self, self.db_log, log) - log_get.assert_called_once_with(self.context, Log_ID) - - def test_get_by_no_existing_id(self): - self.assertRaises(exception.ScheduledOperationLogNotFound, - self.log_class.get_by_id, - self.context, Log_ID) - - @mock.patch('karbor.db.scheduled_operation_log_create') - def test_create(self, log_create): - log_create.return_value = self.db_log - - log = self.log_class(context=self.context) - log.create() - self._compare(self, self.db_log, log) - log_create.assert_called_once_with(self.context, {}) - - self.assertRaises(exception.ObjectActionError, - log.create) - - @mock.patch('karbor.db.scheduled_operation_log_update') - def test_save(self, log_update): - log = self.log_class._from_db_object(self.context, - self.log_class(), - self.db_log) - log.state = 'success' - log.save() - - log_update.assert_called_once_with(self.context, - log.id, - {'state': 'success'}) - - @mock.patch('karbor.db.scheduled_operation_log_delete') - def test_destroy(self, log_delete): - log = self.log_class._from_db_object(self.context, - self.log_class(), - self.db_log) - log.destroy() - log_delete.assert_called_once_with(self.context, log.id) - - -class TestScheduledOperationLogList(test_objects.BaseObjectsTestCase): - - def test_get_by_filters(self): - log = self._create_operation_log('123') - - logs = objects.ScheduledOperationLogList.get_by_filters( - self.context, {'state': ['in_progress']}) - self.assertEqual(1, len(logs.objects)) - log1 = logs.objects[0] - self.assertEqual(log.id, log1.id) - - def _create_operation_log(self, operation_id): - log_info = { - 'operation_id': operation_id, - 'state': 'in_progress', - } - log = objects.ScheduledOperationLog(self.context, **log_info) - log.create() - return log diff --git a/karbor/tests/unit/objects/test_scheduled_operation_state.py b/karbor/tests/unit/objects/test_scheduled_operation_state.py deleted file mode 100644 index 3a72e4ea..00000000 --- a/karbor/tests/unit/objects/test_scheduled_operation_state.py +++ /dev/null @@ -1,165 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import timeutils - -from karbor import context -from karbor import objects -from karbor.tests.unit import objects as test_objects - -NOW = timeutils.utcnow().replace(microsecond=0) - -Operation_ID = '0354ca9ddcd046b693340d78759fd274' - -Fake_State = { - 'created_at': NOW, - 'deleted_at': None, - 'updated_at': NOW, - 'deleted': False, - 'id': 1, - 'operation_id': Operation_ID, - 'service_id': 2, - 'trust_id': '123', - 'state': 'triggered', -} - - -class TestScheduledOperationState(test_objects.BaseObjectsTestCase): - State_Class = objects.ScheduledOperationState - - @mock.patch('karbor.db.scheduled_operation_state_get') - def test_get_by_operation_id(self, state_get): - db_state = Fake_State - state_get.return_value = db_state - state = self.State_Class.get_by_operation_id(self.context, - Operation_ID) - self._compare(self, db_state, state) - state_get.assert_called_once_with(self.context, Operation_ID, []) - - @mock.patch('karbor.db.scheduled_operation_state_create') - def test_create(self, state_create): - db_state = Fake_State - state_create.return_value = db_state - state = self.State_Class(context=self.context) - state.create() - self._compare(self, db_state, state) - state_create.assert_called_once_with(self.context, {}) - - @mock.patch('karbor.db.scheduled_operation_state_update') - def test_save(self, state_update): - db_state = Fake_State - state = self.State_Class._from_db_object(self.context, - self.State_Class(), - db_state) - state.state = 'triggered' - state.save() - - state_update.assert_called_once_with(self.context, - state.operation_id, - {'state': 'triggered'}) - - @mock.patch('karbor.db.scheduled_operation_state_delete') - def test_destroy(self, state_delete): - db_state = Fake_State - state = self.State_Class._from_db_object(self.context, - self.State_Class(), - db_state) - state.destroy() - state_delete.assert_called_once_with(self.context, - state.operation_id) - - def test_get_state_and_operation(self): - ctx = context.get_admin_context() - service, trigger, operation, state = FakeEnv(ctx).do_init() - - state_obj = self.State_Class.get_by_operation_id( - self.context, operation.id, ['operation']) - - self.assertEqual(operation.id, state_obj.operation.id) - - -class TestScheduledOperationStateList(test_objects.BaseObjectsTestCase): - - def setUp(self): - super(TestScheduledOperationStateList, self).setUp() - self.context = context.get_admin_context() - - def test_get_by_filters(self): - service, trigger, operation, state = FakeEnv(self.context).do_init() - states = objects.ScheduledOperationStateList.get_by_filters( - self.context, {'service_id': service.id}, - columns_to_join=['operation']) - self.assertEqual(1, len(states.objects)) - state1 = states.objects[0] - self.assertEqual(state.id, state1.id) - self.assertEqual(operation.id, state1.operation.id) - - -class FakeEnv(object): - - def __init__(self, ctx): - super(FakeEnv, self).__init__() - self.context = ctx - - def do_init(self): - service = self._create_service() - trigger = self._create_trigger() - operation = self._create_operation(trigger.id) - state = self._create_operation_state(operation.id, service.id) - return service, trigger, operation, state - - def _create_service(self): - service_info = { - 'host': "abc", - 'binary': 'karbor-operationengine' - } - service = objects.Service(self.context, **service_info) - service.create() - return service - - def _create_trigger(self): - trigger_info = { - 'name': 'daily', - 'project_id': '123', - 'type': 'time', - 'properties': {} - } - trigger = objects.Trigger(self.context, **trigger_info) - trigger.create() - return trigger - - def _create_operation(self, trigger_id): - operation_info = { - 'name': 'protect vm', - 'description': 'protect vm resource', - 'operation_type': 'protect', - 'user_id': '123', - 'project_id': '123', - 'trigger_id': trigger_id, - 'operation_definition': {} - } - operation = objects.ScheduledOperation(self.context, **operation_info) - operation.create() - return operation - - def _create_operation_state(self, operation_id, service_id): - state_info = { - 'operation_id': operation_id, - 'service_id': service_id, - 'trust_id': '123', - 'state': 'triggered', - } - state = objects.ScheduledOperationState(self.context, **state_info) - state.create() - return state diff --git a/karbor/tests/unit/objects/test_service.py b/karbor/tests/unit/objects/test_service.py deleted file mode 100644 index 5936c2ff..00000000 --- a/karbor/tests/unit/objects/test_service.py +++ /dev/null @@ -1,111 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from karbor import objects -from karbor.tests.unit import fake_service -from karbor.tests.unit import objects as test_objects - - -class TestService(test_objects.BaseObjectsTestCase): - - @mock.patch('karbor.db.sqlalchemy.api.service_get') - def test_get_by_id(self, service_get): - db_service = fake_service.fake_db_service() - service_get.return_value = db_service - service = objects.Service.get_by_id(self.context, 1) - self._compare(self, db_service, service) - service_get.assert_called_once_with(self.context, 1) - - @mock.patch('karbor.db.service_get_by_host_and_topic') - def test_get_by_host_and_topic(self, service_get_by_host_and_topic): - db_service = fake_service.fake_db_service() - service_get_by_host_and_topic.return_value = db_service - service = objects.Service.get_by_host_and_topic( - self.context, 'fake-host', 'fake-topic') - self._compare(self, db_service, service) - service_get_by_host_and_topic.assert_called_once_with( - self.context, 'fake-host', 'fake-topic') - - @mock.patch('karbor.db.service_get_by_args') - def test_get_by_args(self, service_get_by_args): - db_service = fake_service.fake_db_service() - service_get_by_args.return_value = db_service - service = objects.Service.get_by_args( - self.context, 'fake-host', 'fake-key') - self._compare(self, db_service, service) - service_get_by_args.assert_called_once_with( - self.context, 'fake-host', 'fake-key') - - @mock.patch('karbor.db.service_create') - def test_create(self, service_create): - db_service = fake_service.fake_db_service() - service_create.return_value = db_service - service = objects.Service(context=self.context) - service.create() - self.assertEqual(db_service['id'], service.id) - service_create.assert_called_once_with(self.context, {}) - - @mock.patch('karbor.db.service_update') - def test_save(self, service_update): - db_service = fake_service.fake_db_service() - service = objects.Service._from_db_object( - self.context, objects.Service(), db_service) - service.topic = 'foobar' - service.save() - service_update.assert_called_once_with(self.context, service.id, - {'topic': 'foobar'}) - - @mock.patch('karbor.db.service_destroy') - def test_destroy(self, service_destroy): - db_service = fake_service.fake_db_service() - service = objects.Service._from_db_object( - self.context, objects.Service(), db_service) - with mock.patch.object(service._context, 'elevated') as elevated_ctx: - service.destroy() - service_destroy.assert_called_once_with(elevated_ctx(), 123) - - -class TestServiceList(test_objects.BaseObjectsTestCase): - @mock.patch('karbor.db.service_get_all') - def test_get_all(self, service_get_all): - db_service = fake_service.fake_db_service() - service_get_all.return_value = [db_service] - - services = objects.ServiceList.get_all(self.context, 'foo') - service_get_all.assert_called_once_with(self.context, 'foo') - self.assertEqual(1, len(services)) - TestService._compare(self, db_service, services[0]) - - @mock.patch('karbor.db.service_get_all_by_topic') - def test_get_all_by_topic(self, service_get_all_by_topic): - db_service = fake_service.fake_db_service() - service_get_all_by_topic.return_value = [db_service] - - services = objects.ServiceList.get_all_by_topic( - self.context, 'foo', 'bar') - service_get_all_by_topic.assert_called_once_with( - self.context, 'foo', disabled='bar') - self.assertEqual(1, len(services)) - TestService._compare(self, db_service, services[0]) - - @mock.patch('karbor.db.service_get_all_by_args') - def test_get_all_by_args(self, service_get_all_by_args): - db_service = fake_service.fake_db_service() - service_get_all_by_args.return_value = [db_service] - services = objects.ServiceList.get_all_by_args( - self.context, 'fake-host', 'fake-service') - service_get_all_by_args.assert_called_once_with( - self.context, 'fake-host', 'fake-service') - self.assertEqual(1, len(services)) - TestService._compare(self, db_service, services[0]) diff --git a/karbor/tests/unit/objects/test_trigger.py b/karbor/tests/unit/objects/test_trigger.py deleted file mode 100644 index 5d7c98b2..00000000 --- a/karbor/tests/unit/objects/test_trigger.py +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from karbor import objects -from karbor.tests.unit import objects as test_objects - -NOW = timeutils.utcnow().replace(microsecond=0) - -Trigger_ID = '0354ca9ddcd046b693340d78759fd274' - -Fake_Trigger = { - 'created_at': NOW, - 'deleted_at': None, - 'updated_at': NOW, - 'deleted': False, - 'id': Trigger_ID, - 'name': 'daily', - 'project_id': '123', - 'type': 'time', - 'properties': '{}' -} - - -class TestTrigger(test_objects.BaseObjectsTestCase): - Trigger_Class = objects.Trigger - - @mock.patch('karbor.db.trigger_get') - def test_get_by_id(self, trigger_get): - db_trigger = Fake_Trigger.copy() - trigger_get.return_value = db_trigger - - trigger = self.Trigger_Class.get_by_id(self.context, Trigger_ID) - db_trigger['properties'] = jsonutils.loads(db_trigger['properties']) - self._compare(self, db_trigger, trigger) - trigger_get.assert_called_once_with(self.context, Trigger_ID) - - @mock.patch('karbor.db.trigger_create') - def test_create(self, trigger_create): - db_trigger = Fake_Trigger.copy() - trigger_create.return_value = db_trigger - - trigger = self.Trigger_Class(context=self.context) - trigger.create() - db_trigger['properties'] = jsonutils.loads(db_trigger['properties']) - self._compare(self, db_trigger, trigger) - trigger_create.assert_called_once_with(self.context, {}) - - @mock.patch('karbor.db.trigger_update') - def test_save(self, trigger_update): - db_trigger = Fake_Trigger - trigger = self.Trigger_Class._from_db_object(self.context, - self.Trigger_Class(), - db_trigger) - trigger.name = 'weekly' - trigger.save() - - trigger_update.assert_called_once_with(self.context, - trigger.id, - {'name': 'weekly'}) - - @mock.patch('karbor.db.trigger_delete') - def test_destroy(self, trigger_delete): - db_trigger = Fake_Trigger - trigger = self.Trigger_Class._from_db_object(self.context, - self.Trigger_Class(), - db_trigger) - trigger.destroy() - trigger_delete.assert_called_once_with(self.context, trigger.id) diff --git a/karbor/tests/unit/objects/test_verification.py b/karbor/tests/unit/objects/test_verification.py deleted file mode 100644 index a828fb86..00000000 --- a/karbor/tests/unit/objects/test_verification.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from karbor import objects -from karbor.tests.unit import fake_verification -from karbor.tests.unit import objects as test_objects - - -class TestVerification(test_objects.BaseObjectsTestCase): - @staticmethod - def _compare(test, db, obj): - db = {k: v for k, v in db.items()} - test_objects.BaseObjectsTestCase._compare(test, db, obj) - - @mock.patch('karbor.objects.Verification.get_by_id') - def test_get_by_id(self, verification_get): - db_verification = fake_verification.fake_db_verification() - verification_get.return_value = db_verification - verification = objects.Verification.get_by_id(self.context, "1") - verification_get.assert_called_once_with(self.context, "1") - self._compare(self, db_verification, verification) - - @mock.patch('karbor.db.sqlalchemy.api.verification_create') - def test_create(self, verification_create): - db_verification = fake_verification.fake_db_verification() - verification_create.return_value = db_verification - verification = objects.Verification(context=self.context) - verification.create() - self.assertEqual(db_verification['id'], verification.id) - - @mock.patch('karbor.db.sqlalchemy.api.verification_update') - def test_save(self, verification_update): - db_verification = fake_verification.fake_db_verification() - verification = objects.Verification._from_db_object( - self.context, objects.Verification(), db_verification) - verification.status = 'FAILED' - verification.save() - verification_update.assert_called_once_with( - self.context, verification.id, {'status': 'FAILED'}) - - @mock.patch('karbor.db.sqlalchemy.api.verification_destroy') - def test_destroy(self, verification_destroy): - db_verification = fake_verification.fake_db_verification() - verification = objects.Verification._from_db_object( - self.context, objects.Verification(), db_verification) - verification.destroy() - self.assertTrue(verification_destroy.called) - admin_context = verification_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - - def test_obj_field_status(self): - verification = objects.Verification(context=self.context, - status='FAILED') - self.assertEqual('FAILED', verification.status) diff --git a/karbor/tests/unit/operationengine/__init__.py b/karbor/tests/unit/operationengine/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/operationengine/engine/__init__.py b/karbor/tests/unit/operationengine/engine/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/operationengine/engine/executors/__init__.py b/karbor/tests/unit/operationengine/engine/executors/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/operationengine/engine/executors/test_green_thread_executor.py b/karbor/tests/unit/operationengine/engine/executors/test_green_thread_executor.py deleted file mode 100644 index 0f6c30ba..00000000 --- a/karbor/tests/unit/operationengine/engine/executors/test_green_thread_executor.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - -from datetime import datetime -from datetime import timedelta - -from karbor.common import constants -from karbor import context -from karbor import objects -from karbor.services.operationengine.engine.executors import \ - green_thread_executor -from karbor.tests import base - - -class FakeOperationManager(object): - def __init__(self): - super(FakeOperationManager, self).__init__() - self._op_id = 0 - - def run_operation(self, operation_type, operation_definition, **kwargs): - self._op_id = kwargs['param']['operation_id'] - return - - -class GreenThreadExecutorTestCase(base.TestCase): - - def setUp(self): - super(GreenThreadExecutorTestCase, self).setUp() - - self._operation_manager = FakeOperationManager() - self._executor = green_thread_executor.GreenThreadExecutor( - self._operation_manager) - self.context = context.get_admin_context() - - operation = self._create_operation() - self._create_operation_state(operation.id, 0) - self._op_id = operation.id - - def tearDown(self): - self._executor.shutdown() - super(GreenThreadExecutorTestCase, self).tearDown() - - def test_execute_operation(self): - now = datetime.utcnow() - window_time = 30 - self._executor.execute_operation(self._op_id, now, now, window_time) - - self.assertIn(self._op_id, self._executor._operation_thread_map) - - eventlet.sleep(1) - - self.assertTrue(not self._executor._operation_thread_map) - - self.assertEqual(self._op_id, self._operation_manager._op_id) - self._operation_manager._op_id = '' - - state = objects.ScheduledOperationState.get_by_operation_id( - self.context, self._op_id) - self.assertIsNotNone(state.end_time_for_run) - self.assertEqual(constants.OPERATION_STATE_REGISTERED, state.state) - - def test_resume_operation(self): - now = datetime.utcnow() - window_time = 30 - self._executor.resume_operation(self._op_id, end_time_for_run=( - now + timedelta(seconds=window_time))) - - self.assertIn(self._op_id, self._executor._operation_thread_map) - - eventlet.sleep(1) - - self.assertTrue(not self._executor._operation_thread_map) - - self.assertEqual(self._op_id, self._operation_manager._op_id) - self._operation_manager._op_id = '' - - state = objects.ScheduledOperationState.get_by_operation_id( - self.context, self._op_id) - self.assertEqual(constants.OPERATION_STATE_REGISTERED, state.state) - - def test_cancel_operation(self): - now = datetime.utcnow() - window_time = 30 - self._executor.execute_operation(self._op_id, now, now, window_time) - - self.assertIn(self._op_id, self._executor._operation_thread_map) - - self._executor.cancel_operation(self._op_id) - - self.assertTrue(not self._operation_manager._op_id) - - eventlet.sleep(1) - - self.assertTrue(not self._operation_manager._op_id) - - def _create_operation(self, trigger_id='123'): - operation_info = { - 'name': 'protect vm', - 'operation_type': 'protect', - 'user_id': '123', - 'project_id': '123', - 'trigger_id': trigger_id, - 'operation_definition': {} - } - operation = objects.ScheduledOperation(self.context, **operation_info) - operation.create() - return operation - - def _create_operation_state(self, operation_id, service_id): - state_info = { - 'operation_id': operation_id, - 'service_id': service_id, - 'trust_id': '123', - 'state': constants.OPERATION_STATE_INIT, - } - state = objects.ScheduledOperationState(self.context, **state_info) - state.create() - return state diff --git a/karbor/tests/unit/operationengine/engine/executors/test_thread_pool_executor.py b/karbor/tests/unit/operationengine/engine/executors/test_thread_pool_executor.py deleted file mode 100644 index 2b1f15ae..00000000 --- a/karbor/tests/unit/operationengine/engine/executors/test_thread_pool_executor.py +++ /dev/null @@ -1,113 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from datetime import timedelta -import time - -from karbor.common import constants -from karbor import context -from karbor import objects -from karbor.services.operationengine.engine.executors import \ - thread_pool_executor -from karbor.tests import base - - -class FakeOperationManager(object): - def run_operation(self, operation_type, operation_definition, **kwargs): - return - - -class ThreadPoolExecutorTestCase(base.TestCase): - - def setUp(self): - super(ThreadPoolExecutorTestCase, self).setUp() - - self._operation_manager = FakeOperationManager() - self._executor = thread_pool_executor.ThreadPoolExecutor( - self._operation_manager) - self.context = context.get_admin_context() - - def tearDown(self): - super(ThreadPoolExecutorTestCase, self).tearDown() - self._executor.shutdown() - - def test_execute_operation(self): - operation = self._create_operation() - self._create_operation_state(operation.id, 0) - - now = datetime.utcnow() - window_time = 30 - - self._executor.execute_operation(operation.id, now, now, window_time) - - time.sleep(1) - - self.assertEqual(0, len(self._executor._operation_to_run)) - - state = objects.ScheduledOperationState.get_by_operation_id( - self.context, operation.id) - self.assertIsNotNone(state.end_time_for_run) - self.assertEqual(constants.OPERATION_STATE_REGISTERED, state.state) - - def test_resume_operation(self): - operation = self._create_operation() - self._create_operation_state(operation.id, 0) - - now = datetime.utcnow() - window_time = 30 - - self._executor.resume_operation(operation.id, end_time_for_run=( - now + timedelta(seconds=window_time))) - - time.sleep(1) - - self.assertEqual(0, len(self._executor._operation_to_run)) - - state = objects.ScheduledOperationState.get_by_operation_id( - self.context, operation.id) - self.assertEqual(constants.OPERATION_STATE_REGISTERED, state.state) - - def test_cancel_operation(self): - operation_id = '123' - - self._executor.cancel_operation(operation_id) - self.assertEqual(0, len(self._executor._operation_to_cancel)) - - self._executor._operation_to_run[operation_id] = 0 - self._executor.cancel_operation(operation_id) - self.assertEqual(1, len(self._executor._operation_to_cancel)) - - def _create_operation(self, trigger_id='123'): - operation_info = { - 'name': 'protect vm', - 'description': 'protect vm resource', - 'operation_type': 'protect', - 'user_id': '123', - 'project_id': '123', - 'trigger_id': trigger_id, - 'operation_definition': {} - } - operation = objects.ScheduledOperation(self.context, **operation_info) - operation.create() - return operation - - def _create_operation_state(self, operation_id, service_id): - state_info = { - 'operation_id': operation_id, - 'service_id': service_id, - 'trust_id': '123', - 'state': constants.OPERATION_STATE_INIT, - } - state = objects.ScheduledOperationState(self.context, **state_info) - state.create() - return state diff --git a/karbor/tests/unit/operationengine/engine/triggers/__init__.py b/karbor/tests/unit/operationengine/engine/triggers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/operationengine/engine/triggers/test_trigger_manager.py b/karbor/tests/unit/operationengine/engine/triggers/test_trigger_manager.py deleted file mode 100644 index 4fc7c5d0..00000000 --- a/karbor/tests/unit/operationengine/engine/triggers/test_trigger_manager.py +++ /dev/null @@ -1,170 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from stevedore import extension as import_driver - -from karbor import exception -from karbor.services.operationengine.engine.executors import base as base_exe -from karbor.services.operationengine.engine import triggers -from karbor.services.operationengine.engine.triggers import trigger_manager -from karbor.tests import base - - -class FakeTrigger(triggers.BaseTrigger): - def __init__(self, trigger_id, trigger_property, executor): - super(FakeTrigger, self).__init__(trigger_id, trigger_property, - executor) - self._ops = set() - - def shutdown(self): - pass - - def register_operation(self, operation_id, **kwargs): - self._ops.add(operation_id) - - def unregister_operation(self, operation_id, **kwargs): - self._ops.discard(operation_id) - - def update_trigger_property(self, trigger_property): - pass - - @classmethod - def check_trigger_definition(cls, trigger_definition): - pass - - @classmethod - def check_configuration(cls): - pass - - def has_operations(self): - return bool(self._ops) - - -class FakeExecutor(base_exe.BaseExecutor): - def execute_operation(self, operation_id, triggered_time, - expect_start_time, window_time, **kwargs): - pass - - def resume_operation(self, operation_id, **kwargs): - pass - - def cancel_operation(self, operation_id): - pass - - def shutdown(self): - pass - - @classmethod - def obj(cls): - return cls - - @classmethod - def name(cls): - return "FakeExecutor" - - -class TriggerManagerTestCase(base.TestCase): - - def setUp(self): - super(TriggerManagerTestCase, self).setUp() - - with mock.patch.object(import_driver.ExtensionManager, - '_load_plugins') as load_plugin: - load_plugin.return_value = [FakeExecutor] - - self._executor = FakeExecutor(None) - self._manager = trigger_manager.TriggerManager(self._executor) - self._trigger_type = 'fake' - self._manager._trigger_cls_map[self._trigger_type] = FakeTrigger - - def tearDown(self): - self._manager.shutdown() - super(TriggerManagerTestCase, self).tearDown() - - @mock.patch.object(FakeTrigger, 'check_trigger_definition') - def test_check_trigger_definition(self, func): - self._manager.check_trigger_definition(self._trigger_type, {}) - func.assert_called_once_with({}) - - def test_add_trigger(self): - trigger_id = 'add' - self._add_a_trigger(trigger_id) - self.assertRaisesRegex(exception.InvalidInput, - 'Trigger id.* is exist', - self._manager.add_trigger, - trigger_id, self._trigger_type, {}) - - self.assertRaisesRegex(exception.InvalidInput, - 'Invalid trigger type.*', - self._manager.add_trigger, - 1, 'abc', {}) - - def test_remove_trigger(self): - self.assertRaises(exception.TriggerNotFound, - self._manager.remove_trigger, - 1) - trigger_id = 'remove' - op_id = 1 - self._add_a_trigger(trigger_id) - self._manager.register_operation(trigger_id, op_id) - - self.assertRaises(exception.DeleteTriggerNotAllowed, - self._manager.remove_trigger, - trigger_id) - - self._manager.unregister_operation(trigger_id, op_id) - self._manager.remove_trigger(trigger_id) - self.assertRaises(exception.TriggerNotFound, - self._manager.remove_trigger, - trigger_id) - - @mock.patch.object(FakeTrigger, 'update_trigger_property') - def test_update_trigger(self, func): - self.assertRaises(exception.TriggerNotFound, - self._manager.update_trigger, - 1, {}) - - trigger_id = 'update' - self._add_a_trigger(trigger_id) - self._manager.update_trigger(trigger_id, {}) - func.assert_called_once_with({}) - - @mock.patch.object(FakeTrigger, 'register_operation') - @mock.patch.object(FakeExecutor, 'resume_operation') - def test_register_operation(self, resume, register): - self.assertRaises(exception.TriggerNotFound, - self._manager.register_operation, - 1, 1) - trigger_id = 'register' - self._add_a_trigger(trigger_id) - op_id = 1 - self._manager.register_operation(trigger_id, op_id, resume=1) - register.assert_called_once_with(op_id, resume=1) - resume.assert_called_once_with(op_id, resume=1) - - @mock.patch.object(FakeTrigger, 'unregister_operation') - @mock.patch.object(FakeExecutor, 'cancel_operation') - def test_unregister_operation(self, cancel, unregister): - self.assertRaises(exception.TriggerNotFound, - self._manager.unregister_operation, - 1, 1) - trigger_id = 'unregister' - self._add_a_trigger(trigger_id) - op_id = 1 - self._manager.unregister_operation(trigger_id, op_id) - unregister.assert_called_once_with(op_id) - cancel.assert_called_once_with(op_id) - - def _add_a_trigger(self, trigger_id): - self._manager.add_trigger(trigger_id, self._trigger_type, {}) diff --git a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/__init__.py b/karbor/tests/unit/operationengine/engine/triggers/timetrigger/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/test_time_trigger.py b/karbor/tests/unit/operationengine/engine/triggers/timetrigger/test_time_trigger.py deleted file mode 100644 index 2fd27178..00000000 --- a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/test_time_trigger.py +++ /dev/null @@ -1,261 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from datetime import datetime -from datetime import timedelta -import eventlet -from unittest import mock - -from oslo_config import cfg - -from karbor import exception -from karbor.services.operationengine.engine.triggers.timetrigger.time_trigger \ - import TimeTrigger -from karbor.services.operationengine.engine.triggers.timetrigger import utils -from karbor.tests import base - - -class FakeTimeFormat(object): - def __init__(self, start_time, pattern): - super(FakeTimeFormat, self).__init__() - - @classmethod - def check_time_format(cls, pattern): - pass - - def compute_next_time(self, current_time): - return current_time + timedelta(seconds=0.5) - - def get_min_interval(self): - return cfg.CONF.min_interval - - -class FakeExecutor(object): - def __init__(self): - super(FakeExecutor, self).__init__() - self._ops = {} - - def execute_operation(self, operation_id, triggered_time, - expect_start_time, window): - if operation_id not in self._ops: - self._ops[operation_id] = 0 - self._ops[operation_id] += 1 - eventlet.sleep(0.5) - - def clear(self): - self._ops.clear() - - -class TimeTriggerTestCase(base.TestCase): - - def setUp(self): - super(TimeTriggerTestCase, self).setUp() - - self._set_configuration() - - mock_obj = mock.Mock() - mock_obj.return_value = FakeTimeFormat - utils.get_time_format_class = mock_obj - - self._default_executor = FakeExecutor() - - def test_check_configuration(self): - self._set_configuration(10, 20, 30) - self.assertRaisesRegex(exception.InvalidInput, - "Configurations of time trigger are invalid", - TimeTrigger.check_configuration) - self._set_configuration() - - def test_check_trigger_property_start_time(self): - trigger_property = { - "pattern": "", - "start_time": "" - } - - self.assertRaisesRegex(exception.InvalidInput, - "The trigger\'s start time is unknown", - TimeTrigger.check_trigger_definition, - trigger_property) - - trigger_property['start_time'] = 'abc' - self.assertRaisesRegex(exception.InvalidInput, - "The format of trigger .* is not correct", - TimeTrigger.check_trigger_definition, - trigger_property) - - trigger_property['start_time'] = 123 - self.assertRaisesRegex(exception.InvalidInput, - "The trigger .* is not an instance of string", - TimeTrigger.check_trigger_definition, - trigger_property) - - @mock.patch.object(FakeTimeFormat, 'get_min_interval') - def test_check_trigger_property_interval(self, get_min_interval): - get_min_interval.return_value = 0 - - trigger_property = { - "start_time": '2016-8-18 01:03:04' - } - - self.assertRaisesRegex(exception.InvalidInput, - "The interval of two adjacent time points .*", - TimeTrigger.check_trigger_definition, - trigger_property) - - def test_check_trigger_property_window(self): - trigger_property = { - "window": "abc", - "start_time": '2016-8-18 01:03:04' - } - - self.assertRaisesRegex(exception.InvalidInput, - "The trigger window.* is not integer", - TimeTrigger.check_trigger_definition, - trigger_property) - - trigger_property['window'] = 1000 - self.assertRaisesRegex(exception.InvalidInput, - "The trigger windows .* must be between .*", - TimeTrigger.check_trigger_definition, - trigger_property) - - def test_check_trigger_property_end_time(self): - trigger_property = { - "window": 15, - "start_time": '2016-8-18 01:03:04', - "end_time": "abc" - } - - self.assertRaisesRegex(exception.InvalidInput, - "The format of trigger .* is not correct", - TimeTrigger.check_trigger_definition, - trigger_property) - - def test_check_trigger_property_start_bigger_than_end_time(self): - trigger_property = { - "window": 15, - "start_time": '2016-08-18 01:03:04', - "end_time": "2016-08-17 01:03:04" - } - self.assertRaisesRegex(exception.InvalidInput, - "The trigger's start time.* is bigger " - "than end time.*", - TimeTrigger.check_trigger_definition, - trigger_property) - - def test_register_operation(self): - trigger = self._generate_trigger() - - operation_id = "1" - trigger.register_operation(operation_id) - eventlet.sleep(0.3) - - self.assertGreaterEqual(trigger._executor._ops[operation_id], 1) - self.assertRaisesRegex(exception.ScheduledOperationExist, - "The operation_id.* is exist", - trigger.register_operation, - operation_id) - - eventlet.sleep(0.3) - self.assertRaises(exception.TriggerIsInvalid, - trigger.register_operation, - "2") - - def test_unregister_operation(self): - trigger = self._generate_trigger() - operation_id = "2" - - trigger.register_operation(operation_id) - self.assertIn(operation_id, trigger._operation_ids) - - trigger.unregister_operation(operation_id) - self.assertNotIn(operation_id, trigger._operation_ids) - - def test_unregister_operation_when_scheduling(self): - trigger = self._generate_trigger() - - for op_id in ['1', '2', '3']: - trigger.register_operation(op_id) - self.assertIn(op_id, trigger._operation_ids) - eventlet.sleep(0.5) - - for op_id in ['2', '3']: - trigger.unregister_operation(op_id) - self.assertNotIn(op_id, trigger._operation_ids) - eventlet.sleep(0.6) - - self.assertGreaterEqual(trigger._executor._ops['1'], 1) - - self.assertTrue(('2' not in trigger._executor._ops) or ( - '3' not in trigger._executor._ops)) - - def test_update_trigger_property(self): - trigger = self._generate_trigger() - - trigger_property = { - "pattern": "", - "window": 15, - "start_time": '2016-8-18 01:03:04', - "end_time": datetime.utcnow() - } - - self.assertRaisesRegex(exception.InvalidInput, - ".*Can not find the first run time", - trigger.update_trigger_property, - trigger_property) - - trigger.register_operation('1') - eventlet.sleep(0.2) - trigger_property['end_time'] = ( - datetime.utcnow() + timedelta(seconds=1)) - self.assertRaisesRegex(exception.InvalidInput, - ".*First run time.* must be after.*", - trigger.update_trigger_property, - trigger_property) - - def test_update_trigger_property_success(self): - trigger = self._generate_trigger() - trigger.register_operation('1') - eventlet.sleep(0.2) - - trigger_property = { - "pattern": "", - "window": 15, - "start_time": datetime.utcnow(), - "end_time": '' - } - with mock.patch.object(FakeTimeFormat, 'compute_next_time') as c: - c.return_value = datetime.utcnow() + timedelta(seconds=20) - old_id = id(trigger._greenthread) - - trigger.update_trigger_property(trigger_property) - - self.assertNotEqual(old_id, id(trigger._greenthread)) - - def _generate_trigger(self, end_time=None): - if not end_time: - end_time = datetime.utcnow() + timedelta(seconds=1) - - trigger_property = { - "pattern": "", - "window": 15, - "start_time": datetime.utcnow(), - "end_time": end_time - } - - self._default_executor.clear() - return TimeTrigger("123", trigger_property, self._default_executor) - - def _set_configuration(self, min_window=15, - max_window=30, min_interval=60): - self.override_config('min_interval', min_interval) - self.override_config('min_window_time', min_window) - self.override_config('max_window_time', max_window) diff --git a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/test_time_trigger_multi_node.py b/karbor/tests/unit/operationengine/engine/triggers/timetrigger/test_time_trigger_multi_node.py deleted file mode 100644 index 8e332c6d..00000000 --- a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/test_time_trigger_multi_node.py +++ /dev/null @@ -1,282 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import namedtuple -from datetime import datetime -from datetime import timedelta -import eventlet -import functools -import heapq -from unittest import mock - -from oslo_config import cfg -from oslo_utils import uuidutils - -from karbor import context as karbor_context -from karbor import exception -from karbor.services.operationengine.engine.triggers.timetrigger import \ - time_trigger_multi_node as tt -from karbor.services.operationengine.engine.triggers.timetrigger import utils -from karbor.tests import base - - -TriggerExecution = namedtuple('TriggerExecution', - ['execution_time', 'id', 'trigger_id']) - - -class FakeTimeFormat(object): - def __init__(self, start_time, pattern): - super(FakeTimeFormat, self).__init__() - - @classmethod - def check_time_format(cls, pattern): - pass - - def compute_next_time(self, current_time): - return current_time + timedelta(seconds=0.5) - - def get_min_interval(self): - return cfg.CONF.min_interval - - -class FakeExecutor(object): - def __init__(self): - super(FakeExecutor, self).__init__() - self._ops = {} - - def execute_operation(self, operation_id, triggered_time, - expect_start_time, window): - if operation_id not in self._ops: - self._ops[operation_id] = 0 - self._ops[operation_id] += 1 - eventlet.sleep(0.5) - - -class FakeTimeTrigger(object): - @classmethod - def get_time_format(cls, *args, **kwargs): - return FakeTimeFormat - - -class FakeDb(object): - def __init__(self): - self._db = [] - - def trigger_execution_get_next(self, context): - if len(self._db) == 0: - return None - return self._db[0] - - def trigger_execution_create(self, context, trigger_id, time): - element = TriggerExecution(time, uuidutils.generate_uuid(), trigger_id) - heapq.heappush(self._db, element) - - def trigger_execution_update(self, context, id, current_time, new_time): - for idx, element in enumerate(self._db): - if element.id == id: - if element.execution_time != current_time: - return False - self._db[idx] = TriggerExecution(new_time, element.id, - element.trigger_id) - break - heapq.heapify(self._db) - return True - - def trigger_execution_delete(self, context, id, trigger_id): - removed_ids = [] - for idx, element in enumerate(self._db): - if (id and element.id == id) or (trigger_id and - element.trigger_id == trigger_id): - removed_ids.append(idx) - - for idx in reversed(removed_ids): - self._db.pop(idx) - heapq.heapify(self._db) - return len(removed_ids) - - -def time_trigger_test(func): - @functools.wraps(func) - @mock.patch.object(tt, 'db', FakeDb()) - @mock.patch.object(karbor_context, 'get_admin_context', lambda: None) - @mock.patch.object(utils, 'get_time_format_class', - FakeTimeTrigger.get_time_format) - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - - return wrapper - - -class TimeTriggerTestCase(base.TestCase): - _tid = 0 - _default_executor = FakeExecutor() - - def setUp(self): - super(TimeTriggerTestCase, self).setUp() - self._set_configuration() - - def test_check_configuration(self): - self._set_configuration(10, 20, 30) - self.assertRaisesRegex(exception.InvalidInput, - "Configurations of time trigger are invalid", - tt.TimeTrigger.check_configuration) - self._set_configuration() - - @time_trigger_test - def test_check_trigger_property_start_time(self): - trigger_property = { - "pattern": "", - "start_time": "" - } - - self.assertRaisesRegex(exception.InvalidInput, - "The trigger\'s start time is unknown", - tt.TimeTrigger.check_trigger_definition, - trigger_property) - - trigger_property['start_time'] = 'abc' - self.assertRaisesRegex(exception.InvalidInput, - "The format of trigger .* is not correct", - tt.TimeTrigger.check_trigger_definition, - trigger_property) - - trigger_property['start_time'] = 123 - self.assertRaisesRegex(exception.InvalidInput, - "The trigger .* is not an instance of string", - tt.TimeTrigger.check_trigger_definition, - trigger_property) - - @mock.patch.object(FakeTimeFormat, 'get_min_interval') - @time_trigger_test - def test_check_trigger_property_interval(self, get_min_interval): - get_min_interval.return_value = 0 - - trigger_property = { - "start_time": '2016-8-18 01:03:04' - } - - self.assertRaisesRegex(exception.InvalidInput, - "The interval of two adjacent time points .*", - tt.TimeTrigger.check_trigger_definition, - trigger_property) - - @time_trigger_test - def test_check_trigger_property_window(self): - trigger_property = { - "window": "abc", - "start_time": '2016-8-18 01:03:04' - } - - self.assertRaisesRegex(exception.InvalidInput, - "The trigger window.* is not integer", - tt.TimeTrigger.check_trigger_definition, - trigger_property) - - trigger_property['window'] = 1000 - self.assertRaisesRegex(exception.InvalidInput, - "The trigger windows .* must be between .*", - tt.TimeTrigger.check_trigger_definition, - trigger_property) - - @time_trigger_test - def test_check_trigger_property_end_time(self): - trigger_property = { - "window": 15, - "start_time": '2016-8-18 01:03:04', - "end_time": "abc" - } - - self.assertRaisesRegex(exception.InvalidInput, - "The format of trigger .* is not correct", - tt.TimeTrigger.check_trigger_definition, - trigger_property) - - @time_trigger_test - def test_register_operation(self): - trigger = self._generate_trigger() - - operation_id = "1" - trigger.register_operation(operation_id) - eventlet.sleep(2) - - self.assertGreaterEqual(self._default_executor._ops[operation_id], 1) - self.assertRaisesRegex(exception.ScheduledOperationExist, - "The operation_id.* is exist", - trigger.register_operation, - operation_id) - - @time_trigger_test - def test_unregister_operation(self): - trigger = self._generate_trigger() - operation_id = "2" - - trigger.register_operation(operation_id) - self.assertIn(operation_id, trigger._operation_ids) - - trigger.unregister_operation(operation_id) - self.assertNotIn(trigger._id, trigger._operation_ids) - - @time_trigger_test - def test_update_trigger_property(self): - trigger = self._generate_trigger() - - trigger_property = { - "pattern": "", - "window": 15, - "start_time": '2016-8-18 01:03:04', - "end_time": datetime.utcnow(), - } - - self.assertRaisesRegex(exception.InvalidInput, - ".*Can not find the first run time", - trigger.update_trigger_property, - trigger_property) - - @time_trigger_test - def test_update_trigger_property_success(self): - trigger = self._generate_trigger() - trigger.register_operation('7') - eventlet.sleep(0.2) - - trigger_property = { - "pattern": "", - "window": 15, - "start_time": datetime.utcnow(), - "end_time": '' - } - with mock.patch.object(FakeTimeFormat, 'compute_next_time') as c: - c.return_value = datetime.utcnow() + timedelta(seconds=20) - trigger.update_trigger_property(trigger_property) - - def _generate_trigger(self, end_time=None): - if not end_time: - end_time = datetime.utcnow() + timedelta(seconds=1) - - trigger_property = { - "pattern": "", - "window": 15, - "start_time": datetime.utcnow(), - "end_time": end_time - } - - return tt.TimeTrigger( - uuidutils.generate_uuid(), - trigger_property, - self._default_executor, - ) - - def _set_configuration(self, min_window=15, - max_window=30, min_interval=60, poll_interval=1): - self.override_config('min_interval', min_interval) - self.override_config('min_window_time', min_window) - self.override_config('max_window_time', max_window) - self.override_config('trigger_poll_interval', poll_interval) diff --git a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/timeformats/__init__.py b/karbor/tests/unit/operationengine/engine/triggers/timetrigger/timeformats/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/timeformats/test_calendar_time.py b/karbor/tests/unit/operationengine/engine/triggers/timetrigger/timeformats/test_calendar_time.py deleted file mode 100644 index 8a51f144..00000000 --- a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/timeformats/test_calendar_time.py +++ /dev/null @@ -1,185 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from datetime import datetime -from oslo_serialization import jsonutils - -from karbor import exception -from karbor.services.operationengine.engine.triggers.timetrigger.timeformats \ - import calendar_time -from karbor.tests import base - - -class CalendarTimeTestCase(base.TestCase): - - def test_invalid_pattern(self): - patterns = [ - " ", - - "DTSTART:20070220T170000Z\n" - "RRULE:FREQ=WEEKLY;INTERVAL=1;BYHOUR=17;BYMINUTE=1\n", - - "BEGIN:VCALENDAR\n" - "BEGIN:VEVENT\n" - "DTSTART:20070220T170000\n" - "RRULE:FREQ=WEEKLY;INTERVAL=1;BYHOUR=17;BYMINUTE=6\n" - "END:VCALENDAR", - - "BEGIN:VCALENDAR\n" - "DTSTART:20070220T170000\n" - "RRULE:FREQ=WEEKLY;INTERVAL=1;BYHOUR=17;BYMINUTE=6\n" - "END:VEVENT\n" - "END:VCALENDAR", - ] - - regexp = re.compile("^The trigger pattern.* is invalid$", re.DOTALL) - for pattern in patterns: - self.assertRaisesRegex(exception.InvalidInput, - regexp, - calendar_time.ICal.check_time_format, - pattern) - - patterns = [ - "BEGIN:VCALENDAR\n" - "END:VCALENDAR", - - "BEGIN:VCALENDAR\n" - "DTSTART:20070220T170000\n" - "RRULE:FREQ=WEEKLY;INTERVAL=1;BYHOUR=17;BYMINUTE=6\n" - "END:VCALENDAR", - - "BEGIN:VCALENDAR\n" - "BEGIN:VTODO\n" - "END:VTODO\n" - "END:VCALENDAR", - ] - - regexp = re.compile("^The trigger pattern.* must include less than " - "one VEVENT component$", re.DOTALL) - for pattern in patterns: - self.assertRaisesRegex(exception.InvalidInput, - regexp, - calendar_time.ICal.check_time_format, - pattern) - - patterns = [ - "BEGIN:VEVENT\n" - "END:VEVENT", - - "BEGIN:VCALENDAR\n" - "BEGIN:VEVENT\n" - "END:VEVENT\n" - "END:VCALENDAR", - - "BEGIN:VEVENT\n" - "DTSTART:20070220T170000Z\n" - "END:VEVENT", - ] - - regexp = re.compile("^The first VEVENT component of trigger pattern.* " - "must include less than one RRULE property$", - re.DOTALL) - for pattern in patterns: - self.assertRaisesRegex(exception.InvalidInput, - regexp, - calendar_time.ICal.check_time_format, - pattern) - - def test_valid_pattern(self): - pattern = "BEGIN:VEVENT\nRRULE:FREQ=MINUTELY;INTERVAL=60;\nEND:VEVENT" - self.assertIsNone(calendar_time.ICal.check_time_format(pattern)) - - def test_escape_valid_pattern(self): - pattern0 = "BEGIN:VEVENT\\nRRULE:FREQ=HOURLY;INTERVAL=1;\\nEND:VEVENT" - self.assertIsNone(calendar_time.ICal.check_time_format(pattern0)) - - pattern1 = "BEGIN:VEVENT\nRRULE:FREQ=HOURLY;INTERVAL=1;\nEND:VEVENT" - properties = {"format": "calendar", - "pattern": pattern1} - body = {"trigger_info": {"name": "test", - "type": "time", - "properties": properties, - }} - quest = jsonutils.dumps(body) - recieve = jsonutils.loads(quest) - trigger_info = recieve["trigger_info"] - trigger_property = trigger_info.get("properties", None) - pattern_ = trigger_property.get("pattern", None) - - self.assertIsNone(calendar_time.ICal.check_time_format(pattern_)) - - def test_compute_next_time(self): - pattern = ( - "BEGIN:VEVENT\n" - "RRULE:FREQ=WEEKLY;INTERVAL=1;BYHOUR=17;BYMINUTE=1\n" - "END:VEVENT" - ) - dtstart = datetime(2016, 2, 20, 17, 0, 0) - time_obj = calendar_time.ICal(dtstart, pattern) - now = datetime(2016, 2, 20, 15, 11, 0) - time1 = time_obj.compute_next_time(now) - time2 = datetime(2016, 2, 20, 17, 1, 0) - self.assertEqual(time2, time1) - now = datetime(2016, 3, 20, 15, 11, 0) - time1 = time_obj.compute_next_time(now) - time2 = datetime(2016, 3, 26, 17, 1, 0) - self.assertEqual(time2, time1) - - pattern = ( - "BEGIN:VEVENT\n" - "RRULE:FREQ=WEEKLY;BYDAY=MO,WE,FR;BYHOUR=10;BYMINUTE=0\n" - "RRULE:FREQ=WEEKLY;BYDAY=TU,TH,SA;BYHOUR=20;BYMINUTE=0\n" - "END:VEVENT" - ) - dtstart = datetime(2016, 2, 20, 17, 0, 0) - time_obj = calendar_time.ICal(dtstart, pattern) - now = datetime(2016, 7, 31, 15, 11, 0) - time1 = time_obj.compute_next_time(now) - time2 = datetime(2016, 8, 1, 10, 0, 0) - self.assertEqual(time2, time1) - time1 = time_obj.compute_next_time(time2) - time2 = datetime(2016, 8, 2, 20, 0, 0) - self.assertEqual(time2, time1) - time1 = time_obj.compute_next_time(time2) - time2 = datetime(2016, 8, 3, 10, 0, 0) - self.assertEqual(time2, time1) - - def test_get_min_interval(self): - pattern = ( - "BEGIN:VEVENT\n" - "RRULE:FREQ=WEEKLY;INTERVAL=1;BYHOUR=17;BYMINUTE=1\n" - "END:VEVENT" - ) - dtstart = datetime(2016, 2, 20, 17, 0, 0) - time_obj = calendar_time.ICal(dtstart, pattern) - self.assertEqual(604800, time_obj.get_min_interval()) - - pattern = ( - "BEGIN:VEVENT\n" - "RRULE:FREQ=WEEKLY;COUNT=1\n" - "END:VEVENT" - ) - dtstart = datetime(2016, 2, 20, 17, 0, 0) - time_obj = calendar_time.ICal(dtstart, pattern) - self.assertIsNone(time_obj.get_min_interval()) - - def test_get_min_interval_when_interval_is_bigger_than_default_rate(self): - pattern = ( - "BEGIN:VEVENT\n" - "RRULE:FREQ=MINUTELY;INTERVAL=5;\n" - "END:VEVENT" - ) - dtstart = datetime(2016, 2, 20, 17, 0, 0) - time_obj = calendar_time.ICal(dtstart, pattern) - self.assertEqual(300, time_obj.get_min_interval()) diff --git a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/timeformats/test_crontab_time.py b/karbor/tests/unit/operationengine/engine/triggers/timetrigger/timeformats/test_crontab_time.py deleted file mode 100644 index d701f286..00000000 --- a/karbor/tests/unit/operationengine/engine/triggers/timetrigger/timeformats/test_crontab_time.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from datetime import timedelta - -from karbor import exception -from karbor.services.operationengine.engine.triggers.timetrigger.timeformats \ - import crontab_time -from karbor.tests import base - - -class CrontabTimeTestCase(base.TestCase): - - def setUp(self): - super(CrontabTimeTestCase, self).setUp() - - self._time_format = crontab_time.Crontab - - def test_none_pattern(self): - self.assertRaisesRegex(exception.InvalidInput, - "The trigger pattern is None", - self._time_format.check_time_format, - "") - - def test_invalid_pattern(self): - self.assertRaisesRegex(exception.InvalidInput, - "The trigger pattern.* is invalid", - self._time_format.check_time_format, - "*") - - def test_compute_next_time(self): - now = datetime(2016, 1, 20, 15, 11, 0, 0) - obj = self._time_format(now, "* * * * *") - time1 = obj.compute_next_time(now) - time2 = now + timedelta(minutes=1) - self.assertEqual(time2, time1) - - def test_get_interval(self): - obj = self._time_format(datetime.now(), "* * * * *") - self.assertEqual(60, obj.get_min_interval()) diff --git a/karbor/tests/unit/operationengine/operations/__init__.py b/karbor/tests/unit/operationengine/operations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/operationengine/operations/test_protect_operation.py b/karbor/tests/unit/operationengine/operations/test_protect_operation.py deleted file mode 100644 index 997b01fa..00000000 --- a/karbor/tests/unit/operationengine/operations/test_protect_operation.py +++ /dev/null @@ -1,147 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from unittest import mock - -from karbor.common import constants -from karbor import context -from karbor import exception -from karbor import objects -from karbor.services.operationengine.operations import base as base_operation -from karbor.services.operationengine.operations import protect_operation -from karbor.tests import base - - -class FakeUserTrustManager(object): - def add_operation(self, context, operation_id): - return "123" - - def delete_operation(self, context, operation_id): - pass - - def resume_operation(self, operation_id, user_id, project_id, trust_id): - pass - - -class FakeCheckPoint(object): - def create(self, provider_id, plan_id, extra_info): - return - - -class FakeKarborClient(object): - def __init__(self): - super(FakeKarborClient, self).__init__() - self._check_point = FakeCheckPoint() - - @property - def checkpoints(self): - return self._check_point - - -class ProtectOperationTestCase(base.TestCase): - """Test cases for ProtectOperation class.""" - - def setUp(self): - super(ProtectOperationTestCase, self).setUp() - self._user_trust_manager = FakeUserTrustManager() - self._operation = protect_operation.ProtectOperation( - self._user_trust_manager - ) - self._operation_db = self._create_operation() - self._fake_karbor_client = FakeKarborClient() - - def test_check_operation_definition(self): - self.assertRaises(exception.InvalidOperationDefinition, - self._operation.check_operation_definition, - {}) - - @mock.patch.object(base_operation.Operation, '_create_karbor_client') - def test_execute(self, client): - client.return_value = self._fake_karbor_client - now = datetime.utcnow() - param = { - 'operation_id': self._operation_db.id, - 'triggered_time': now, - 'expect_start_time': now, - 'window_time': 30, - 'run_type': constants.OPERATION_RUN_TYPE_EXECUTE, - 'user_id': self._operation_db.user_id, - 'project_id': self._operation_db.project_id - } - self._operation.run(self._operation_db.operation_definition, - param=param) - - logs = objects.ScheduledOperationLogList.get_by_filters( - context.get_admin_context(), - {'state': constants.OPERATION_EXE_STATE_SUCCESS, - 'operation_id': self._operation_db.id}, 1, - None, ['created_at'], ['desc']) - - self.assertIsNotNone(logs) - log = logs.objects[0] - self.assertTrue(now, log.triggered_time) - - @mock.patch.object(base_operation.Operation, '_create_karbor_client') - def test_resume(self, client): - log = self._create_operation_log(self._operation_db.id) - client.return_value = self._fake_karbor_client - now = datetime.utcnow() - param = { - 'operation_id': self._operation_db.id, - 'triggered_time': now, - 'expect_start_time': now, - 'window_time': 30, - 'run_type': constants.OPERATION_RUN_TYPE_RESUME, - 'user_id': self._operation_db.user_id, - 'project_id': self._operation_db.project_id - } - self._operation.run(self._operation_db.operation_definition, - param=param) - - logs = objects.ScheduledOperationLogList.get_by_filters( - context.get_admin_context(), - {'state': constants.OPERATION_EXE_STATE_SUCCESS, - 'operation_id': self._operation_db.id}, 1, - None, ['created_at'], ['desc']) - - self.assertIsNotNone(logs) - log1 = logs.objects[0] - self.assertTrue(log.id, log1.id) - - def _create_operation(self): - operation_info = { - 'name': 'protect vm', - 'description': 'protect vm resource', - 'operation_type': 'protect', - 'user_id': '123', - 'project_id': '123', - 'trigger_id': '123', - 'operation_definition': { - 'provider_id': '123', - 'plan_id': '123' - } - } - operation = objects.ScheduledOperation(context.get_admin_context(), - **operation_info) - operation.create() - return operation - - def _create_operation_log(self, operation_id): - log_info = { - 'operation_id': operation_id, - 'state': constants.OPERATION_EXE_STATE_IN_PROGRESS, - } - log = objects.ScheduledOperationLog(context.get_admin_context(), - **log_info) - log.create() - return log diff --git a/karbor/tests/unit/operationengine/operations/test_retention_operation.py b/karbor/tests/unit/operationengine/operations/test_retention_operation.py deleted file mode 100644 index b896ac31..00000000 --- a/karbor/tests/unit/operationengine/operations/test_retention_operation.py +++ /dev/null @@ -1,197 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from datetime import timedelta -from unittest import mock - -from karbor.common import constants -from karbor import context -from karbor import exception -from karbor import objects -from karbor.services.operationengine.operations import base as base_operation -from karbor.services.operationengine.operations import retention_operation -from karbor.tests import base - - -class FakeUserTrustManager(object): - def add_operation(self, context, operation_id): - return "123" - - def delete_operation(self, context, operation_id): - pass - - def resume_operation(self, operation_id, user_id, project_id, trust_id): - pass - - -class FakeCheckPointInstance(object): - def __init__(self, id, created_at): - super(FakeCheckPointInstance, self).__init__() - self.id = id - self.created_at = created_at - self.status = 'available' - self.project_id = '123' - self.protection_plan = { - 'provider_id': '123', - 'id': '123', - 'resources': None, - 'name': 'protect vm resource' - } - - -class FakeCheckPoint(object): - - _checkpoints = [] - - def __init__(self): - super(FakeCheckPoint, self).__init__() - - def create_all_check_points(self): - now = datetime.utcnow() - d1 = now - timedelta(days=16) - d2 = now - timedelta(days=15) - d3 = now - timedelta(days=3) - self._checkpoints.insert( - 0, FakeCheckPointInstance("1", d1.strftime("%Y-%m-%d"))) - self._checkpoints.insert( - 0, FakeCheckPointInstance("2", d2.strftime("%Y-%m-%d"))) - self._checkpoints.insert( - 0, FakeCheckPointInstance("3", d3.strftime("%Y-%m-%d"))) - - def create(self, provider_id, plan_id, extra_info): - now = datetime.utcnow() - self._checkpoints.insert( - 0, FakeCheckPointInstance("4", now.strftime("%Y-%m-%d"))) - - def delete(self, provider_id, checkpoint_id): - self._checkpoints = [x for x in self._checkpoints if x.id != - checkpoint_id] - - def list(self, provider_id, search_opts=None, limit=None, sort=None): - return self._checkpoints - - -class FakeKarborClient(object): - def __init__(self): - super(FakeKarborClient, self).__init__() - self._check_point = FakeCheckPoint() - - @property - def checkpoints(self): - return self._check_point - - def create_all_check_points(self): - self._check_point.create_all_check_points() - - -class ProtectOperationTestCase(base.TestCase): - """Test cases for ProtectOperation class.""" - - def setUp(self): - super(ProtectOperationTestCase, self).setUp() - self._user_trust_manager = FakeUserTrustManager() - self._operation = retention_operation.RetentionProtectOperation( - self._user_trust_manager - ) - self._operation_db = self._create_operation() - self._fake_karbor_client = FakeKarborClient() - - def test_check_operation_definition(self): - self.assertRaises(exception.InvalidOperationDefinition, - self._operation.check_operation_definition, - {}) - - @mock.patch.object(base_operation.Operation, '_create_karbor_client') - def test_execute(self, client): - client.return_value = self._fake_karbor_client - self._fake_karbor_client.create_all_check_points() - now = datetime.utcnow() - param = { - 'operation_id': self._operation_db.id, - 'triggered_time': now, - 'expect_start_time': now, - 'window_time': 30, - 'run_type': constants.OPERATION_RUN_TYPE_EXECUTE, - 'user_id': self._operation_db.user_id, - 'project_id': self._operation_db.project_id - } - self._operation.run(self._operation_db.operation_definition, - param=param) - - logs = objects.ScheduledOperationLogList.get_by_filters( - context.get_admin_context(), - {'state': constants.OPERATION_EXE_DURATION_STATE_SUCCESS, - 'operation_id': self._operation_db.id}, 1, - None, ['created_at'], ['desc']) - self.assertIsNotNone(logs) - log = logs.objects[0] - self.assertTrue(now, log.triggered_time) - checkpoints = self._fake_karbor_client.checkpoints.list("123") - self.assertEqual(2, len(checkpoints)) - - @mock.patch.object(base_operation.Operation, '_create_karbor_client') - def test_resume(self, client): - log = self._create_operation_log(self._operation_db.id) - client.return_value = self._fake_karbor_client - now = datetime.utcnow() - param = { - 'operation_id': self._operation_db.id, - 'triggered_time': now, - 'expect_start_time': now, - 'window_time': 30, - 'run_type': constants.OPERATION_RUN_TYPE_RESUME, - 'user_id': self._operation_db.user_id, - 'project_id': self._operation_db.project_id - } - self._operation.run(self._operation_db.operation_definition, - param=param) - - logs = objects.ScheduledOperationLogList.get_by_filters( - context.get_admin_context(), - {'state': constants.OPERATION_EXE_DURATION_STATE_SUCCESS, - 'operation_id': self._operation_db.id}, 1, - None, ['created_at'], ['desc']) - - self.assertIsNotNone(logs) - log1 = logs.objects[0] - self.assertTrue(log.id, log1.id) - - def _create_operation(self): - operation_info = { - 'name': 'protect vm', - 'description': 'protect vm resource', - 'operation_type': 'retention_protect', - 'user_id': '123', - 'project_id': '123', - 'trigger_id': '123', - 'operation_definition': { - 'max_backups': '3', - 'provider_id': '123', - 'plan_id': '123', - 'retention_duration': '14' - } - } - operation = objects.ScheduledOperation(context.get_admin_context(), - **operation_info) - operation.create() - return operation - - def _create_operation_log(self, operation_id): - log_info = { - 'operation_id': operation_id, - 'state': constants.OPERATION_EXE_STATE_IN_PROGRESS, - } - log = objects.ScheduledOperationLog(context.get_admin_context(), - **log_info) - log.create() - return log diff --git a/karbor/tests/unit/operationengine/test_karbor_client.py b/karbor/tests/unit/operationengine/test_karbor_client.py deleted file mode 100644 index 0cb4b686..00000000 --- a/karbor/tests/unit/operationengine/test_karbor_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from karbor.common import karbor_keystone_plugin -from karbor import context -from karbor.services.operationengine import karbor_client -from karbor.tests import base - - -class KarborClientTest(base.TestCase): - - @mock.patch.object(karbor_keystone_plugin.KarborKeystonePlugin, - 'get_service_endpoint') - def test_create_client(self, get_service_endpoint): - ctx = context.get_admin_context() - ctx.project_id = '123' - - cfg.CONF.set_default('version', '1', 'karbor_client') - - karbor_url = "http://127.0.0.1:9090" - sc = karbor_client.create(ctx, endpoint=karbor_url) - self.assertEqual(karbor_url, sc.http_client.endpoint) - - karbor_url = "http://127.0.0.1:9090/$(project_id)s" - get_service_endpoint.return_value = karbor_url - endpoint = karbor_url.replace("$(project_id)s", ctx.project_id) - sc = karbor_client.create(ctx) - self.assertEqual(endpoint, sc.http_client.endpoint) diff --git a/karbor/tests/unit/operationengine/test_manager.py b/karbor/tests/unit/operationengine/test_manager.py deleted file mode 100644 index 2c62f508..00000000 --- a/karbor/tests/unit/operationengine/test_manager.py +++ /dev/null @@ -1,204 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_messaging.rpc import dispatcher as rpc_dispatcher - -from karbor.common import constants -from karbor import context -from karbor import exception -from karbor import objects -from karbor.services.operationengine import manager as service_manager -from karbor.tests import base - - -class FakeTriggerManager(object): - - def __init__(self): - super(FakeTriggerManager, self).__init__() - self._trigger = {} - - def register_operation(self, trigger_id, operation_id, **kwargs): - if trigger_id not in self._trigger: - self._trigger[trigger_id] = [] - - if operation_id in self._trigger[trigger_id]: - raise exception.ScheduledOperationExist(op_id=operation_id) - - self._trigger[trigger_id].append(operation_id) - - def unregister_operation(self, trigger_id, operation_id, **kwargs): - pass - - def check_trigger_definition(self, trigger_type, trigger_definition): - pass - - def add_trigger(self, trigger_id, trigger_type, trigger_property): - self._trigger[trigger_id] = [] - - -class FakeUserTrustManager(object): - def add_operation(self, context, operation_id): - return "123" - - def delete_operation(self, context, operation_id): - pass - - def resume_operation(self, operation_id, user_id, project_id, trust_id): - pass - - -class OperationEngineManagerTestCase(base.TestCase): - """Test cases for OperationEngineManager class.""" - - def setUp(self): - super(OperationEngineManagerTestCase, self).setUp() - - self.manager = service_manager.OperationEngineManager() - self.manager._service_id = 0 - self.manager._trigger_manager = FakeTriggerManager() - self.manager._user_trust_manager = FakeUserTrustManager() - - self.ctxt = context.get_admin_context() - self._trigger = self._create_one_trigger() - self._operation = self._create_scheduled_operation(self._trigger.id) - - def test_init_host(self): - trigger_id = self._trigger.id - operation_id = self._operation.id - - self._create_operation_state(operation_id) - - op = self._create_scheduled_operation(self._trigger.id, False) - self._create_operation_state(op.id) - - self.manager._restore() - - trigger_manager = self.manager._trigger_manager - self.assertIn(trigger_id, trigger_manager._trigger) - self.assertIn(operation_id, trigger_manager._trigger[trigger_id]) - self.assertNotIn(op.id, trigger_manager._trigger[trigger_id]) - - def test_create_operation(self): - op = self._create_scheduled_operation(self._trigger.id, False) - with mock.patch( - 'karbor.services.operationengine.operations.protect_operation.' - 'ProtectOperation.check_operation_definition' - ): - self.manager.create_scheduled_operation( - self.ctxt, op) - - state_obj = objects.ScheduledOperationState.get_by_operation_id( - self.ctxt, op.id) - - self.assertIsNotNone(state_obj) - - def test_create_operation_invalid_operation_definition(self): - op = self._create_scheduled_operation(self._trigger.id, False) - self.assertRaises( - rpc_dispatcher.ExpectedException, - self.manager.create_scheduled_operation, - self.ctxt, - op, - ) - - def test_create_operation_invalid_operation_type(self): - op = self._create_scheduled_operation(self._trigger.id, False) - op.operation_type = "123" - self.assertRaises( - rpc_dispatcher.ExpectedException, - self.manager.create_scheduled_operation, - self.ctxt, - op, - ) - - def test_delete_operation_get_state_failed(self): - self.assertRaises(rpc_dispatcher.ExpectedException, - self.manager.delete_scheduled_operation, - self.ctxt, self._operation.id, 1) - - def test_delete_operation(self): - state = self._create_operation_state(self._operation.id) - - self.manager.delete_scheduled_operation( - self.ctxt, self._operation.id, 1) - - state = objects.ScheduledOperationState.get_by_operation_id( - self.ctxt, self._operation.id) - self.assertEqual(constants.OPERATION_STATE_DELETED, state.state) - - @mock.patch.object(FakeTriggerManager, 'unregister_operation') - def test_suspend_resume_operation(self, unregister): - op_id = 'suspend' - trigger_id = "trigger" - - self.manager.resume_scheduled_operation(self.ctxt, op_id, trigger_id) - self.assertIn(op_id, - self.manager._trigger_manager._trigger[trigger_id]) - - self.manager.resume_scheduled_operation(self.ctxt, op_id, trigger_id) - self.assertEqual(1, len( - self.manager._trigger_manager._trigger[trigger_id])) - - # resume - self.manager.suspend_scheduled_operation(self.ctxt, op_id, trigger_id) - unregister.assert_called_once_with(trigger_id, op_id) - - @mock.patch.object(FakeTriggerManager, 'check_trigger_definition') - def test_verify_trigger(self, check_trigger_definition): - self.manager.verify_trigger(self.ctxt, self._trigger) - check_trigger_definition.assert_called_once_with( - self._trigger.type, self._trigger.properties) - - def _create_one_trigger(self): - trigger_info = { - 'project_id': "123", - "name": "123", - "type": "time", - "properties": { - "format": "crontab", - "pattern": "* * * * *" - }, - } - trigger = objects.Trigger(self.ctxt, **trigger_info) - trigger.create() - return trigger - - def _create_scheduled_operation(self, trigger_id, enabled=True): - operation_info = { - "name": "123", - 'description': '123', - "operation_type": "protect", - 'user_id': '123', - "project_id": "123", - "trigger_id": trigger_id, - "operation_definition": { - "plan_id": "" - }, - "enabled": enabled - } - operation = objects.ScheduledOperation(self.ctxt, **operation_info) - operation.create() - return operation - - def _create_operation_state(self, operation_id): - state_info = { - "operation_id": operation_id, - "service_id": self.manager._service_id, - 'trust_id': '123', - "state": constants.OPERATION_STATE_REGISTERED - } - operation_state = objects.ScheduledOperationState(context, - **state_info) - operation_state.create() - return operation_state diff --git a/karbor/tests/unit/operationengine/test_operation_manager.py b/karbor/tests/unit/operationengine/test_operation_manager.py deleted file mode 100644 index 723c68b2..00000000 --- a/karbor/tests/unit/operationengine/test_operation_manager.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from karbor import exception -from karbor.services.operationengine import operation_manager -from karbor.services.operationengine import operations -from karbor.tests import base - - -class FakeUserTrustManager(object): - def add_operation(self, context, operation_id): - return "123" - - def delete_operation(self, context, operation_id): - pass - - def resume_operation(self, operation_id, user_id, project_id, trust_id): - pass - - -class FakeOperation(operations.base.Operation): - OPERATION_TYPE = 'fake' - - @classmethod - def check_operation_definition(cls, operation_definition): - pass - - @classmethod - def _execute(cls, operation_definition, param): - pass - - @classmethod - def _resume(cls, operation_definition, param, log_ref): - pass - - -class OperationManagerTestCase(base.TestCase): - """Test cases for OperationManager class.""" - - def setUp(self): - super(OperationManagerTestCase, self).setUp() - - self._operation_type = FakeOperation.OPERATION_TYPE - self._mock_operations = (FakeOperation, ) - with mock.patch( - 'karbor.services.operationengine.operations.all_operations' - ) as mock_all_ops: - mock_all_ops.return_value = self._mock_operations - self._user_trust_manager = FakeUserTrustManager() - self._op_manager = operation_manager.OperationManager( - self._user_trust_manager) - - @mock.patch.object(FakeOperation, 'check_operation_definition') - def test_check_operation_definition(self, mock_check): - self._op_manager.check_operation_definition(self._operation_type, {}) - mock_check.assert_called_once_with({}) - - @mock.patch.object(operations.base.Operation, 'run') - def test_run_operation(self, mock_run): - self._op_manager.run_operation(self._operation_type, {}) - mock_run.assert_called_once_with({}) - - def test_invalid_operation_type(self): - self.assertRaisesRegex(exception.InvalidInput, - 'Invalid operation type:', - self._op_manager.check_operation_definition, - "123", {}) - - self.assertRaisesRegex(exception.InvalidInput, - 'Invalid operation type:', - self._op_manager.run_operation, - "123", {}) diff --git a/karbor/tests/unit/operationengine/test_user_trust_manager.py b/karbor/tests/unit/operationengine/test_user_trust_manager.py deleted file mode 100644 index 3ce045f1..00000000 --- a/karbor/tests/unit/operationengine/test_user_trust_manager.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from karbor import context -from karbor.services.operationengine import user_trust_manager -from karbor.tests import base - - -G_TOKEN_ID = 'abcdefg' -G_TRUST_ID = '1234556' - - -class FakeSession(object): - def get_token(self): - return G_TOKEN_ID - - -class FakeSKP(object): - def create_trust_to_karbor(self, context): - return G_TRUST_ID - - def delete_trust_to_karbor(self, trust_id): - return - - def create_trust_session(self, trust_id): - return FakeSession() - - -class UserTrustManagerTestCase(base.TestCase): - """Test cases for UserTrustManager class.""" - - def setUp(self): - super(UserTrustManagerTestCase, self).setUp() - - self._user_id = '123' - self._project_id = '456' - self._ctx = context.RequestContext(user_id=self._user_id, - project_id=self._project_id) - self._manager = user_trust_manager.UserTrustManager() - self._manager._skp = FakeSKP() - - def test_add_operation(self): - manager = self._manager - operation_id = 'abc' - self.assertEqual(G_TRUST_ID, manager.add_operation( - self._ctx, operation_id)) - - info = manager._get_user_trust_info(self._user_id, self._project_id) - self.assertIn(operation_id, info['operation_ids']) - - manager.add_operation(self._ctx, operation_id) - self.assertEqual(1, len(info['operation_ids'])) - - @mock.patch.object(FakeSKP, 'delete_trust_to_karbor') - def test_delete_operation(self, del_trust): - manager = self._manager - op_ids = ['abc', '123'] - for op_id in op_ids: - manager.add_operation(self._ctx, op_id) - - info = manager._get_user_trust_info(self._user_id, self._project_id) - self.assertEqual(2, len(info['operation_ids'])) - - manager.delete_operation(self._ctx, op_ids[0]) - self.assertEqual(1, len(info['operation_ids'])) - - manager.delete_operation(self._ctx, op_ids[1]) - self.assertEqual(0, len(info['operation_ids'])) - del_trust.assert_called_once_with(G_TRUST_ID) - - def test_resume_operation(self): - manager = self._manager - operation_id = 'abc' - manager.resume_operation(operation_id, self._user_id, - self._project_id, G_TRUST_ID) - - info = manager._get_user_trust_info(self._user_id, self._project_id) - self.assertIn(operation_id, info['operation_ids']) - - manager.resume_operation(operation_id, self._user_id, - self._project_id, G_TRUST_ID) - self.assertEqual(1, len(info['operation_ids'])) - - def test_get_token(self): - manager = self._manager - manager.add_operation(self._ctx, 'abc') - - self.assertEqual(G_TOKEN_ID, manager.get_token( - self._user_id, self._project_id)) diff --git a/karbor/tests/unit/plugins/__init__.py b/karbor/tests/unit/plugins/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/plugins/test_database_protectable_plugin.py b/karbor/tests/unit/plugins/test_database_protectable_plugin.py deleted file mode 100644 index 6d4ef2cf..00000000 --- a/karbor/tests/unit/plugins/test_database_protectable_plugin.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from oslo_config import cfg -from troveclient.v1 import instances - -from karbor.context import RequestContext -from karbor.resource import Resource -# Need to register trove_client -from karbor.services.protection.clients import trove # noqa -from karbor.services.protection.protectable_plugins.database \ - import DatabaseInstanceProtectablePlugin -from karbor.tests import base - - -class DatabaseInstanceProtectablePluginTest(base.TestCase): - def setUp(self): - super(DatabaseInstanceProtectablePluginTest, self).setUp() - service_catalog = [ - {'type': 'database', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - def test_create_client_by_endpoint(self): - cfg.CONF.set_default('trove_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'trove_client') - plugin = DatabaseInstanceProtectablePlugin(self._context) - self.assertEqual( - 'http://127.0.0.1:8774/v2.1/abcd', - plugin._client(self._context).client.management_url) - - def test_create_client_by_catalog(self): - plugin = DatabaseInstanceProtectablePlugin(self._context) - - self.assertEqual( - 'http://127.0.0.1:8774/v2.1/abcd', - plugin._client(self._context).client.management_url) - - def test_get_resource_type(self): - plugin = DatabaseInstanceProtectablePlugin(self._context) - - self.assertEqual("OS::Trove::Instance", plugin.get_resource_type()) - - def test_get_parent_resource_types(self): - plugin = DatabaseInstanceProtectablePlugin(self._context) - self.assertEqual(("OS::Keystone::Project", ), - plugin.get_parent_resource_types()) - - @mock.patch.object(instances.Instances, 'list') - def test_list_resources(self, mock_instance_list): - plugin = DatabaseInstanceProtectablePlugin(self._context) - - instance_info = collections.namedtuple('instance_info', ['id', 'name', - 'status']) - mock_instance_list.return_value = [ - instance_info(id='123', name='name123', status='ACTIVE'), - instance_info(id='456', name='name456', status='ACTIVE')] - self.assertEqual([Resource('OS::Trove::Instance', '123', 'name123'), - Resource('OS::Trove::Instance', '456', 'name456')], - plugin.list_resources(self._context)) - - @mock.patch.object(instances.Instances, 'get') - def test_show_resource(self, mock_instance_get): - plugin = DatabaseInstanceProtectablePlugin(self._context) - - instance_info = collections.namedtuple( - 'instance_info', ['id', 'name', 'status']) - mock_instance_get.return_value = instance_info( - id='123', name='name123', status='ACTIVE') - self.assertEqual(Resource('OS::Trove::Instance', '123', 'name123'), - plugin.show_resource(self._context, '123')) - - @mock.patch.object(instances.Instances, 'list') - def test_get_dependent_resources(self, mock_instance_list): - plugin = DatabaseInstanceProtectablePlugin(self._context) - - instance_info = collections.namedtuple( - 'instance_info', ['id', 'name', 'status', 'project_id']) - project_info = collections.namedtuple( - 'project_info', ['id', 'name', 'status']) - mock_instance_list.return_value = [ - instance_info(id='123', name='name123', status='ACTIVE', - project_id='abcd'), - instance_info(id='456', name='name456', status='ACTIVE', - project_id='abcd')] - project = project_info(id='abcd', name='name456', status='available') - self.assertEqual([Resource('OS::Trove::Instance', '123', 'name123'), - Resource('OS::Trove::Instance', '456', 'name456')], - plugin.get_dependent_resources( - self._context, project)) diff --git a/karbor/tests/unit/plugins/test_image_protectable_plugin.py b/karbor/tests/unit/plugins/test_image_protectable_plugin.py deleted file mode 100644 index 5c45b0c3..00000000 --- a/karbor/tests/unit/plugins/test_image_protectable_plugin.py +++ /dev/null @@ -1,163 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import namedtuple -from unittest import mock - -from glanceclient.v2 import images -from keystoneauth1 import session as keystone_session -from novaclient.v2 import servers -from oslo_config import cfg - -from karbor.common import constants -from karbor.context import RequestContext -from karbor import resource -from karbor.services.protection.protectable_plugins.image import \ - ImageProtectablePlugin -from karbor.tests import base - -CONF = cfg.CONF - -image_info = namedtuple('image_info', field_names=['id', 'owner', 'name', - 'status']) -server_info = namedtuple('server_info', field_names=['id', 'type', 'name', - 'image']) -project_info = namedtuple('project_info', field_names=['id', 'type', 'name']) - - -class ImageProtectablePluginTest(base.TestCase): - def setUp(self): - super(ImageProtectablePluginTest, self).setUp() - service_catalog = [{ - 'type': 'image', - 'endpoints': [{'publicURL': 'http://127.0.0.1:9292'}] - }, { - 'type': 'compute', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}] - }] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_create_client_by_endpoint(self, mock_generate_session): - CONF.set_default('glance_endpoint', 'http://127.0.0.1:9292', - 'glance_client') - CONF.set_default('nova_endpoint', 'http://127.0.0.1:8774/v2.1', - 'nova_client') - plugin = ImageProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - self.assertEqual( - plugin._glance_client(self._context).http_client.endpoint_override, - 'http://127.0.0.1:9292') - self.assertEqual( - plugin._nova_client(self._context).client.endpoint_override, - 'http://127.0.0.1:8774/v2.1/abcd') - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_create_client_by_catalog(self, mock_generate_session): - CONF.set_default('glance_catalog_info', 'image:glance:publicURL', - 'glance_client') - CONF.set_default('nova_catalog_info', 'compute:nova:publicURL', - 'nova_client') - plugin = ImageProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - self.assertEqual( - plugin._glance_client(self._context).http_client.endpoint_override, - 'http://127.0.0.1:9292') - self.assertEqual( - plugin._nova_client(self._context).client.endpoint_override, - 'http://127.0.0.1:8774/v2.1/abcd') - - def test_get_resource_type(self): - plugin = ImageProtectablePlugin(self._context) - self.assertEqual( - plugin.get_resource_type(), - constants.IMAGE_RESOURCE_TYPE) - - def test_get_parent_resource_type(self): - plugin = ImageProtectablePlugin(self._context) - self.assertItemsEqual( - plugin.get_parent_resource_types(), - (constants.SERVER_RESOURCE_TYPE, constants.PROJECT_RESOURCE_TYPE)) - - @mock.patch.object(images.Controller, 'list') - def test_list_resources(self, mokc_image_list): - plugin = ImageProtectablePlugin(self._context) - mokc_image_list.return_value = [ - image_info(id='123', name='name123', owner='abcd', - status='active'), - image_info(id='456', name='name456', owner='efgh', - status='active'), - ] - self.assertEqual(plugin.list_resources(self._context), - [resource.Resource(type=constants.IMAGE_RESOURCE_TYPE, - id='123', name='name123'), - resource.Resource(type=constants.IMAGE_RESOURCE_TYPE, - id='456', name='name456') - ]) - - @mock.patch.object(images.Controller, 'get') - def test_show_resource(self, mock_image_get): - image_info = namedtuple('image_info', field_names=['id', 'name', - 'status']) - plugin = ImageProtectablePlugin(self._context) - mock_image_get.return_value = image_info(id='123', name='name123', - status='active') - self.assertEqual(plugin.show_resource(self._context, '123'), - resource.Resource(type=constants.IMAGE_RESOURCE_TYPE, - id='123', name='name123')) - - @mock.patch.object(images.Controller, 'get') - @mock.patch.object(servers.ServerManager, 'get') - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_get_server_dependent_resources(self, mock_generate_session, - mock_server_get, - mock_image_get): - vm = server_info(id='server1', - type=constants.SERVER_RESOURCE_TYPE, - name='nameserver1', - image=dict(id='123', name='name123')) - image = image_info(id='123', name='name123', owner='abcd', - status='active') - plugin = ImageProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - mock_server_get.return_value = vm - mock_image_get.return_value = image - self.assertEqual(plugin.get_dependent_resources(self._context, vm), - [resource.Resource( - type=constants.IMAGE_RESOURCE_TYPE, - id='123', - name='name123', - extra_info={'server_id': 'server1'})]) - - @mock.patch.object(images.Controller, 'list') - def test_get_project_dependent_resources(self, mock_image_list): - project = project_info(id='abcd', type=constants.PROJECT_RESOURCE_TYPE, - name='nameabcd') - plugin = ImageProtectablePlugin(self._context) - mock_image_list.return_value = [ - image_info('123', 'abcd', 'nameabcd', 'active'), - image_info('456', 'efgh', 'nameefgh', 'active'), - ] - self.assertEqual( - plugin.get_dependent_resources(self._context, project), - [resource.Resource(type=constants.IMAGE_RESOURCE_TYPE, - name='nameabcd', - id='123')]) diff --git a/karbor/tests/unit/plugins/test_network_protectable_plugin.py b/karbor/tests/unit/plugins/test_network_protectable_plugin.py deleted file mode 100644 index 7ad05e21..00000000 --- a/karbor/tests/unit/plugins/test_network_protectable_plugin.py +++ /dev/null @@ -1,160 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import namedtuple -from unittest import mock - -from keystoneauth1 import session as keystone_session -from neutronclient.v2_0 import client -from oslo_config import cfg - -from karbor.common import constants -from karbor.context import RequestContext -from karbor import resource -from karbor.services.protection.protectable_plugins.network import \ - NetworkProtectablePlugin -from karbor.tests import base - -CONF = cfg.CONF - -server_info = namedtuple('server_info', - field_names=['id', 'type', 'name', 'addresses']) -project_info = namedtuple('project_info', field_names=['id', 'type']) - -FakePorts = {'ports': [ - {'fixed_ips': [{'subnet_id': 'subnet-1', - 'ip_address': '10.0.0.21'}], - 'id': 'port-1', - 'mac_address': 'mac_address_1', - 'device_id': 'vm_id_1', - 'name': '', - 'admin_state_up': True, - 'network_id': '658e1063-4ee3-4649-a2c9'}, - {'fixed_ips': [{'subnet_id': 'subnet-1', - 'ip_address': '10.0.0.22'}], - 'id': 'port-2', - 'mac_address': 'mac_address_2', - 'device_id': 'vm_id_2', - 'name': '', - 'admin_state_up': True, - 'network_id': 'network_id_2'} -]} - - -class NetworkProtectablePluginTest(base.TestCase): - def setUp(self): - super(NetworkProtectablePluginTest, self).setUp() - - service_catalog = [{ - 'type': 'network', - 'endpoints': [{'publicURL': 'http://127.0.0.1:9696'}] - }, { - 'type': 'compute', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}] - }] - self._context = RequestContext(user_id='admin', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_create_client_by_endpoint(self, mock_generate_session): - CONF.set_default('neutron_endpoint', 'http://127.0.0.1:9696', - 'neutron_client') - CONF.set_default('nova_endpoint', 'http://127.0.0.1:8774/v2.1', - 'nova_client') - plugin = NetworkProtectablePlugin(self._context) - neutronclient = plugin._neutron_client(self._context) - novaclient = plugin._nova_client(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - self.assertEqual('network', - neutronclient.httpclient.service_type) - self.assertEqual('http://127.0.0.1:9696', - neutronclient.httpclient.endpoint_url) - self.assertEqual('http://127.0.0.1:8774/v2.1/abcd', - novaclient.client.endpoint_override) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_create_client_by_catalog(self, mock_generate_session): - CONF.set_default('neutron_catalog_info', 'network:neutron:publicURL', - 'neutron_client') - CONF.set_default('nova_catalog_info', 'compute:nova:publicURL', - 'nova_client') - plugin = NetworkProtectablePlugin(self._context) - neutronclient = plugin._neutron_client(self._context) - novaclient = plugin._nova_client(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - self.assertEqual('network', - neutronclient.httpclient.service_type) - self.assertEqual('http://127.0.0.1:9696', - neutronclient.httpclient.endpoint_url) - self.assertEqual('http://127.0.0.1:8774/v2.1/abcd', - novaclient.client.endpoint_override) - - def test_get_resource_type(self): - plugin = NetworkProtectablePlugin(self._context) - self.assertEqual(constants.NETWORK_RESOURCE_TYPE, - plugin.get_resource_type()) - - def test_get_parent_resource_type(self): - plugin = NetworkProtectablePlugin(self._context) - self.assertItemsEqual(plugin.get_parent_resource_types(), - (constants.PROJECT_RESOURCE_TYPE)) - - @mock.patch.object(client.Client, 'list_networks') - def test_list_resources(self, mock_client_list_networks): - plugin = NetworkProtectablePlugin(self._context) - - fake_network_info = {'networks': [ - {u'status': u'ACTIVE', - u'description': u'', - u'tenant_id': u'abcd', - u'name': u'private'}, - {u'status': u'ACTIVE', - u'description': u'', - u'name': u'ext_net', - u'tenant_id': u'abcd'} - ]} - - mock_client_list_networks.return_value = fake_network_info - self.assertEqual(plugin.list_resources(self._context), - [resource.Resource - (type=constants.NETWORK_RESOURCE_TYPE, - id='abcd', - name="Network Topology")]) - - @mock.patch.object(client.Client, 'list_networks') - def test_get_project_dependent_resources(self, mock_client_list_networks): - project = project_info(id='abcd', - type=constants.PROJECT_RESOURCE_TYPE) - plugin = NetworkProtectablePlugin(self._context) - fake_network_info = {'networks': [ - {u'status': u'ACTIVE', - u'description': u'', - u'tenant_id': u'abcd', - u'name': u'private'}, - {u'status': u'ACTIVE', - u'description': u'', - u'name': u'ext_net', - u'tenant_id': u'abcd'} - ]} - mock_client_list_networks.return_value = fake_network_info - self.assertEqual(plugin.get_dependent_resources(self._context, - project), - [resource.Resource - (type=constants.NETWORK_RESOURCE_TYPE, - id='abcd', - name="Network Topology")]) diff --git a/karbor/tests/unit/plugins/test_pod_protectable_plugin.py b/karbor/tests/unit/plugins/test_pod_protectable_plugin.py deleted file mode 100644 index 6fa9c5ad..00000000 --- a/karbor/tests/unit/plugins/test_pod_protectable_plugin.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock -import uuid - -from kubernetes.client.models.v1_object_meta import V1ObjectMeta -from kubernetes.client.models.v1_pod import V1Pod -from kubernetes.client.models.v1_pod_list import V1PodList -from kubernetes.client.models.v1_pod_status import V1PodStatus -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.clients import k8s # noqa -from karbor.services.protection.protectable_plugins.pod \ - import K8sPodProtectablePlugin -from karbor.tests import base - - -class PodProtectablePluginTest(base.TestCase): - def setUp(self): - super(PodProtectablePluginTest, self).setUp() - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=None) - - def test_get_resource_type(self): - plugin = K8sPodProtectablePlugin(self._context, cfg.CONF) - - self.assertEqual('OS::Kubernetes::Pod', plugin.get_resource_type()) - - def test_get_parent_resource_types(self): - plugin = K8sPodProtectablePlugin(self._context, cfg.CONF) - self.assertEqual(("OS::Keystone::Project"), - plugin.get_parent_resource_types()) - - @mock.patch('kubernetes.client.apis.core_v1_api.' - 'CoreV1Api.list_namespaced_pod') - def test_list_resources(self, mock_pod_list): - plugin = K8sPodProtectablePlugin(self._context, cfg.CONF) - - pod = V1Pod(api_version="v1", kind="Pod", - metadata=V1ObjectMeta( - name="busybox-test", - namespace="default", - uid="dd8236e1-8c6c-11e7-9b7a-fa163e18e097"), - status=V1PodStatus(phase="Running")) - pod_list = V1PodList(items=[pod]) - mock_pod_list.return_value = pod_list - self.assertEqual([ - Resource('OS::Kubernetes::Pod', - uuid.uuid5(uuid.NAMESPACE_OID, "default:busybox-test"), - 'default:busybox-test')], - plugin.list_resources(self._context)) - - @mock.patch('kubernetes.client.apis.core_v1_api.' - 'CoreV1Api.read_namespaced_pod') - def test_show_resource(self, mock_pod_get): - plugin = K8sPodProtectablePlugin(self._context, cfg.CONF) - - pod = V1Pod(api_version="v1", kind="Pod", - metadata=V1ObjectMeta( - name="busybox-test", - namespace="default", - uid="dd8236e1-8c6c-11e7-9b7a-fa163e18e097"), - status=V1PodStatus(phase="Running")) - mock_pod_get.return_value = pod - self.assertEqual(Resource( - 'OS::Kubernetes::Pod', - uuid.uuid5(uuid.NAMESPACE_OID, "default:busybox-test"), - 'default:busybox-test'), - plugin.show_resource(self._context, - uuid.uuid5(uuid.NAMESPACE_OID, - "default:busybox-test"), - {'name': 'default:busybox-test'}) - ) diff --git a/karbor/tests/unit/plugins/test_server_protectable_plugin.py b/karbor/tests/unit/plugins/test_server_protectable_plugin.py deleted file mode 100644 index 814013e2..00000000 --- a/karbor/tests/unit/plugins/test_server_protectable_plugin.py +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from keystoneauth1 import session as keystone_session -from novaclient.v2 import servers -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.protectable_plugins.server \ - import ServerProtectablePlugin -from karbor.tests import base - - -class ServerProtectablePluginTest(base.TestCase): - def setUp(self): - super(ServerProtectablePluginTest, self).setUp() - service_catalog = [ - {'type': 'compute', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_create_client_by_endpoint(self, mock_generate_session): - cfg.CONF.set_default('nova_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'nova_client') - plugin = ServerProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - self.assertEqual('compute', - plugin._client(self._context).client.service_type) - self.assertEqual( - 'http://127.0.0.1:8774/v2.1/abcd', - plugin._client(self._context).client.endpoint_override) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_create_client_by_catalog(self, mock_generate_session): - plugin = ServerProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - self.assertEqual('compute', - plugin._client(self._context).client.service_type) - self.assertEqual( - 'http://127.0.0.1:8774/v2.1/abcd', - plugin._client(self._context).client.endpoint_override) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_get_resource_type(self, mock_generate_session): - plugin = ServerProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - self.assertEqual("OS::Nova::Server", plugin.get_resource_type()) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - def test_get_parent_resource_types(self, mock_generate_session): - plugin = ServerProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - self.assertEqual(("OS::Keystone::Project", ), - plugin.get_parent_resource_types()) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - @mock.patch.object(servers.ServerManager, 'list') - def test_list_resources(self, mock_server_list, mock_generate_session): - plugin = ServerProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - - server_info = collections.namedtuple('server_info', ['id', 'name', - 'status']) - mock_server_list.return_value = [ - server_info(id='123', name='name123', status='ACTIVE'), - server_info(id='456', name='name456', status='ACTIVE')] - self.assertEqual([Resource('OS::Nova::Server', '123', 'name123'), - Resource('OS::Nova::Server', '456', 'name456')], - plugin.list_resources(self._context)) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - @mock.patch.object(servers.ServerManager, 'get') - def test_show_resource(self, mock_server_get, mock_generate_session): - plugin = ServerProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - - server_info = collections.namedtuple('server_info', ['id', 'name', - 'status']) - mock_server_get.return_value = server_info(id='123', name='name123', - status='ACTIVE') - self.assertEqual(Resource('OS::Nova::Server', '123', 'name123'), - plugin.show_resource(self._context, '123')) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - @mock.patch.object(servers.ServerManager, 'list') - def test_get_dependent_resources(self, mock_server_list, - mock_generate_session): - plugin = ServerProtectablePlugin(self._context) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - - server_info = collections.namedtuple('server_info', ['id', 'name', - 'status']) - mock_server_list.return_value = [ - server_info(id='123', name='name123', status='ACTIVE'), - server_info(id='456', name='name456', status='ACTIVE')] - self.assertEqual([Resource('OS::Nova::Server', '123', 'name123'), - Resource('OS::Nova::Server', '456', 'name456')], - plugin.get_dependent_resources(self._context, None)) diff --git a/karbor/tests/unit/plugins/test_share_protectable_plugin.py b/karbor/tests/unit/plugins/test_share_protectable_plugin.py deleted file mode 100644 index 32e4c5b5..00000000 --- a/karbor/tests/unit/plugins/test_share_protectable_plugin.py +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from manilaclient.v2 import shares -from oslo_config import cfg - -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.protectable_plugins.share \ - import ShareProtectablePlugin -from karbor.tests import base - - -class ShareProtectablePluginTest(base.TestCase): - def setUp(self): - super(ShareProtectablePluginTest, self).setUp() - service_catalog = [ - {'type': 'sharev2', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - def test_create_client_by_endpoint(self): - cfg.CONF.set_default('manila_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'manila_client') - plugin = ShareProtectablePlugin(self._context) - - self.assertEqual( - 'http://127.0.0.1:8774/v2.1/abcd', - plugin._client(self._context).client.endpoint_url) - - def test_create_client_by_catalog(self): - plugin = ShareProtectablePlugin(self._context) - - self.assertEqual( - 'http://127.0.0.1:8774/v2.1/abcd', - plugin._client(self._context).client.endpoint_url) - - def test_get_resource_type(self): - plugin = ShareProtectablePlugin(self._context) - - self.assertEqual("OS::Manila::Share", plugin.get_resource_type()) - - def test_get_parent_resource_types(self): - plugin = ShareProtectablePlugin(self._context) - self.assertEqual(("OS::Keystone::Project", ), - plugin.get_parent_resource_types()) - - @mock.patch.object(shares.ShareManager, 'list') - def test_list_resources(self, mock_share_list): - plugin = ShareProtectablePlugin(self._context) - - share_info = collections.namedtuple('share_info', ['id', 'name', - 'status']) - mock_share_list.return_value = [ - share_info(id='123', name='name123', status='available'), - share_info(id='456', name='name456', status='available')] - self.assertEqual([Resource('OS::Manila::Share', '123', 'name123'), - Resource('OS::Manila::Share', '456', 'name456')], - plugin.list_resources(self._context)) - - @mock.patch.object(shares.ShareManager, 'get') - def test_show_resource(self, mock_share_get): - plugin = ShareProtectablePlugin(self._context) - - share_info = collections.namedtuple('share_info', ['id', 'name', - 'status']) - mock_share_get.return_value = share_info(id='123', name='name123', - status='available') - self.assertEqual(Resource('OS::Manila::Share', '123', 'name123'), - plugin.show_resource(self._context, '123')) - - @mock.patch.object(shares.ShareManager, 'list') - def test_get_dependent_resources(self, mock_share_list): - plugin = ShareProtectablePlugin(self._context) - - share_info = collections.namedtuple( - 'share_info', ['id', 'name', 'status', 'project_id']) - project_info = collections.namedtuple( - 'share_info', ['id', 'name', 'status']) - mock_share_list.return_value = [ - share_info(id='123', name='name123', status='available', - project_id='abcd'), - share_info(id='456', name='name456', status='available', - project_id='abcd')] - project = project_info(id='abcd', name='name456', status='available') - self.assertEqual([Resource('OS::Manila::Share', '123', 'name123'), - Resource('OS::Manila::Share', '456', 'name456')], - plugin.get_dependent_resources( - self._context, project)) diff --git a/karbor/tests/unit/plugins/test_volume_protectable_plugin.py b/karbor/tests/unit/plugins/test_volume_protectable_plugin.py deleted file mode 100644 index 3c948350..00000000 --- a/karbor/tests/unit/plugins/test_volume_protectable_plugin.py +++ /dev/null @@ -1,233 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from cinderclient.v3 import volumes -from collections import namedtuple - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.protectable_plugins.volume \ - import VolumeProtectablePlugin - -from kubernetes.client.models.v1_cinder_volume_source \ - import V1CinderVolumeSource -from kubernetes.client.models.v1_object_meta import V1ObjectMeta -from kubernetes.client.models.v1_persistent_volume import V1PersistentVolume -from kubernetes.client.models.v1_persistent_volume_claim \ - import V1PersistentVolumeClaim -from kubernetes.client.models.v1_persistent_volume_claim_spec \ - import V1PersistentVolumeClaimSpec -from kubernetes.client.models.v1_persistent_volume_claim_status \ - import V1PersistentVolumeClaimStatus -from kubernetes.client.models.v1_persistent_volume_claim_volume_source \ - import V1PersistentVolumeClaimVolumeSource -from kubernetes.client.models.v1_persistent_volume_spec \ - import V1PersistentVolumeSpec - -from kubernetes.client.models.v1_pod import V1Pod -from kubernetes.client.models.v1_pod_spec import V1PodSpec -from kubernetes.client.models.v1_pod_status import V1PodStatus -from kubernetes.client.models.v1_volume import V1Volume - -from karbor.tests import base -from oslo_config import cfg - -project_info = namedtuple('project_info', field_names=['id', 'type', 'name']) -vol_info = namedtuple('vol_info', ['id', 'attachments', 'name', 'status', - 'availability_zone']) - - -class VolumeProtectablePluginTest(base.TestCase): - def setUp(self): - super(VolumeProtectablePluginTest, self).setUp() - service_catalog = [ - {'type': 'volumev3', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8776/v3/abcd'}], - }, - ] - self._context = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - - def test_create_client_by_endpoint(self): - cfg.CONF.set_default('cinder_endpoint', - 'http://127.0.0.1:8776/v3', - 'cinder_client') - plugin = VolumeProtectablePlugin(self._context) - self.assertEqual('volumev3', - plugin._client(self._context).client.service_type) - self.assertEqual('http://127.0.0.1:8776/v3/abcd', - plugin._client(self._context).client.management_url) - - def test_create_client_by_catalog(self): - plugin = VolumeProtectablePlugin(self._context) - self.assertEqual('volumev3', - plugin._client(self._context).client.service_type) - self.assertEqual('http://127.0.0.1:8776/v3/abcd', - plugin._client(self._context).client.management_url) - - def test_get_resource_type(self): - plugin = VolumeProtectablePlugin(self._context) - self.assertEqual("OS::Cinder::Volume", plugin.get_resource_type()) - - def test_get_parent_resource_types(self): - plugin = VolumeProtectablePlugin(self._context) - self.assertItemsEqual(("OS::Nova::Server", "OS::Kubernetes::Pod", - "OS::Keystone::Project"), - plugin.get_parent_resource_types()) - - @mock.patch.object(volumes.VolumeManager, 'list') - def test_list_resources(self, mock_volume_list): - plugin = VolumeProtectablePlugin(self._context) - mock_volume_list.return_value = [ - vol_info('123', [], 'name123', 'available', 'az1'), - vol_info('456', [], 'name456', 'available', 'az1'), - ] - self.assertEqual([Resource('OS::Cinder::Volume', '123', 'name123', - {'availability_zone': 'az1'}), - Resource('OS::Cinder::Volume', '456', 'name456', - {'availability_zone': 'az1'})], - plugin.list_resources(self._context)) - - @mock.patch.object(volumes.VolumeManager, 'get') - def test_show_resource(self, mock_volume_get): - plugin = VolumeProtectablePlugin(self._context) - - vol_info = namedtuple('vol_info', ['id', 'name', 'status', - 'availability_zone']) - mock_volume_get.return_value = vol_info(id='123', name='name123', - status='available', - availability_zone='az1') - self.assertEqual(Resource('OS::Cinder::Volume', '123', 'name123', - {'availability_zone': 'az1'}), - plugin.show_resource(self._context, "123")) - - @mock.patch.object(volumes.VolumeManager, 'list') - def test_get_server_dependent_resources(self, mock_volume_list): - plugin = VolumeProtectablePlugin(self._context) - - attached = [{'server_id': 'abcdef', 'name': 'name'}] - mock_volume_list.return_value = [ - vol_info('123', attached, 'name123', 'available', 'az1'), - vol_info('456', [], 'name456', 'available', 'az1'), - ] - self.assertEqual([Resource('OS::Cinder::Volume', '123', 'name123', - {'availability_zone': 'az1'})], - plugin.get_dependent_resources( - self._context, - Resource("OS::Nova::Server", 'abcdef', 'name', - {'availability_zone': 'az1'}))) - - @mock.patch.object(volumes.VolumeManager, 'list') - def test_get_project_dependent_resources(self, mock_volume_list): - project = project_info('abcd', constants.PROJECT_RESOURCE_TYPE, - 'nameabcd') - plugin = VolumeProtectablePlugin(self._context) - - volumes = [ - mock.Mock(name='Volume', id='123', availability_zone='az1'), - mock.Mock(name='Volume', id='456', availability_zone='az1'), - ] - setattr(volumes[0], 'os-vol-tenant-attr:tenant_id', 'abcd') - setattr(volumes[1], 'os-vol-tenant-attr:tenant_id', 'efgh') - setattr(volumes[0], 'name', 'name123') - setattr(volumes[1], 'name', 'name456') - - mock_volume_list.return_value = volumes - self.assertEqual( - [Resource('OS::Cinder::Volume', '123', 'name123', - {'availability_zone': 'az1'})], - plugin.get_dependent_resources(self._context, project)) - - @mock.patch.object(volumes.VolumeManager, 'list') - @mock.patch('kubernetes.client.apis.core_v1_api.' - 'CoreV1Api.read_persistent_volume') - @mock.patch('kubernetes.client.apis.core_v1_api.' - 'CoreV1Api.read_namespaced_persistent_volume_claim') - @mock.patch('kubernetes.client.apis.core_v1_api.' - 'CoreV1Api.read_namespaced_pod') - def test_get_pod_dependent_resources(self, mock_pod_read, - mock_pvc_read, - mock_pv_read, - mock_volume_list): - plugin = VolumeProtectablePlugin(self._context) - - pod = V1Pod(api_version="v1", kind="Pod", - metadata=V1ObjectMeta( - name="busybox-test", - namespace="default", - uid="dd8236e1-8c6c-11e7-9b7a-fa163e18e097"), - spec=V1PodSpec( - volumes=[V1Volume( - name="name123", - persistent_volume_claim=( - V1PersistentVolumeClaimVolumeSource( - claim_name="cinder-claim1'")))], - containers=[]), - status=V1PodStatus(phase="Running")) - - pvc = V1PersistentVolumeClaim( - api_version="v1", - kind="PersistentVolumeClaim", - metadata=V1ObjectMeta( - name="cinder-claim1", - namespace="default", - uid="fec036b7-9123-11e7-a930-fa163e18e097"), - spec=V1PersistentVolumeClaimSpec( - access_modes=["ReadWriteOnce"], - volume_name="pvc-fec036b7-9123-11e7-a930-fa163e18e097"), - status=V1PersistentVolumeClaimStatus(phase="Bound")) - - pv = V1PersistentVolume( - api_version="v1", - kind="PersistentVolume", - metadata=V1ObjectMeta( - name="pvc-fec036b7-9123-11e7-a930-fa163e18e097", - namespace="None", - uid="ff43c217-9123-11e7-a930-fa163e18e097"), - spec=V1PersistentVolumeSpec( - cinder=V1CinderVolumeSource( - fs_type=None, - read_only=None, - volume_id="7daedb1d-fc99-4a35-ab1b-b64971271d17" - )), - status=V1PersistentVolumeClaimStatus(phase="Bound")) - - volumes = [ - mock.Mock(name='Volume', - id='7daedb1d-fc99-4a35-ab1b-b64971271d17', - availability_zone='az1'), - mock.Mock(name='Volume', - id='7daedb1d-fc99-4a35-ab1b-b64922441d17', - availability_zone='az1'), - ] - setattr(volumes[0], 'name', 'name123') - setattr(volumes[1], 'name', 'name456') - - mock_pod_read.return_value = pod - mock_pvc_read.return_value = pvc - mock_pv_read.return_value = pv - mock_volume_list.return_value = volumes - self.assertEqual( - [Resource('OS::Cinder::Volume', - '7daedb1d-fc99-4a35-ab1b-b64971271d17', - 'name123', - {'availability_zone': 'az1'})], - plugin.get_dependent_resources( - self._context, - Resource(id="c88b92a8-e8b4-504c-bad4-343d92061871", - name="default:busybox-test", - type="OS::Kubernetes::Pod"))) diff --git a/karbor/tests/unit/protection/__init__.py b/karbor/tests/unit/protection/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/tests/unit/protection/fake_clients.py b/karbor/tests/unit/protection/fake_clients.py deleted file mode 100644 index c246a241..00000000 --- a/karbor/tests/unit/protection/fake_clients.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import namedtuple - -Stack = namedtuple('Stack', ['id', - 'stack_status', - 'stack_name']) - -FakeStacks = {} - - -class FakeHeatClient(object): - class Stacks(object): - def create(self, stack_name, template): - stack = Stack(id='fake_stack_id', - stack_name=stack_name, - stack_status='CREATE_IN_PROGRESS') - FakeStacks[stack.id] = stack - return { - 'stack': { - 'id': 'stack_id_1', - } - } - - def get(self, stack_id): - return FakeStacks[stack_id] - - def __init__(self): - super(FakeHeatClient, self).__init__() - self.stacks = self.Stacks() diff --git a/karbor/tests/unit/protection/fake_s3_client.py b/karbor/tests/unit/protection/fake_s3_client.py deleted file mode 100644 index 79fc3b38..00000000 --- a/karbor/tests/unit/protection/fake_s3_client.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from botocore.exceptions import ClientError - - -class FakeS3Client(object): - def __init__(self, *args, **kwargs): - super(FakeS3Client, self).__init__() - - @classmethod - def connection(cls, *args, **kargs): - return FakeS3Connection() - - -class FakeS3Connection(object): - def __init__(self, *args, **kwargs): - super(FakeS3Connection, self).__init__() - self.s3_dir = {} - self.object_headers = {} - - def create_bucket(self, Bucket): - self.s3_dir[Bucket] = { - 'Keys': {} - } - - def list_objects(self, Bucket, Prefix, Marker): - body = [] - prefix = '' if not Prefix else Prefix - for obj in self.s3_dir[Bucket]['Keys'].keys(): - if obj.startswith(prefix): - body.append({ - 'Key': obj - }) - if len(body) == 0: - return { - 'IsTruncated': False - } - else: - return { - 'Contents': body, - 'IsTruncated': False - } - - def put_object(self, Bucket, Key, Body, Metadata=None): - if Bucket in self.s3_dir.keys(): - self.s3_dir[Bucket]['Keys'][Key] = { - 'Body': FakeS3Stream(Body), - 'Metadata': Metadata if Metadata else {} - } - else: - raise ClientError("error_bucket") - - def get_object(self, Bucket, Key): - if Bucket in self.s3_dir.keys(): - if Key in self.s3_dir[Bucket]['Keys'].keys(): - return self.s3_dir[Bucket]['Keys'][Key] - else: - raise ClientError("error_object") - else: - raise ClientError("error_bucket") - - def delete_object(self, Bucket, Key): - if Bucket in self.s3_dir.keys(): - if Key in self.s3_dir[Bucket]['Keys'].keys(): - del self.s3_dir[Bucket]['Keys'][Key] - else: - raise ClientError("error_object") - else: - raise ClientError("error_bucket") - - -class FakeS3Stream(object): - def __init__(self, data): - self.data = data - - def read(self): - return self.data diff --git a/karbor/tests/unit/protection/fake_swift_client.py b/karbor/tests/unit/protection/fake_swift_client.py deleted file mode 100644 index eeb126ed..00000000 --- a/karbor/tests/unit/protection/fake_swift_client.py +++ /dev/null @@ -1,95 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import tempfile - -from swiftclient import ClientException - - -class FakeSwiftClient(object): - def __init__(self, *args, **kwargs): - super(FakeSwiftClient, self).__init__() - - @classmethod - def connection(cls, *args, **kargs): - return FakeSwiftConnection() - - -class FakeSwiftConnection(object): - def __init__(self, *args, **kwargs): - super(FakeSwiftConnection, self).__init__() - self.swiftdir = tempfile.mkdtemp() - self.object_headers = {} - - def put_container(self, container): - container_dir = self.swiftdir + "/" + container - if os.path.exists(container_dir) is True: - return - else: - os.makedirs(container_dir) - - def get_container(self, container, prefix, limit, marker, - end_marker, full_listing): - container_dir = self.swiftdir + "/" + container - body = [] - if prefix: - objects_dir = container_dir + "/" + prefix - else: - objects_dir = container_dir - for f in os.listdir(objects_dir): - if os.path.isfile(objects_dir + "/" + f): - body.append({"name": f}) - else: - body.append({"subdir": f}) - return None, body - - def put_object(self, container, obj, contents, headers=None): - container_dir = self.swiftdir + "/" + container - obj_file = container_dir + "/" + obj - obj_dir = obj_file[0:obj_file.rfind("/")] - if os.path.exists(container_dir) is True: - if os.path.exists(obj_dir) is False: - os.makedirs(obj_dir) - with open(obj_file, "w") as f: - f.write(contents) - - self.object_headers[obj_file] = {} - for key, value in headers.items(): - self.object_headers[obj_file][str(key)] = str(value) - return - else: - raise ClientException("error_container") - - def get_object(self, container, obj): - container_dir = self.swiftdir + "/" + container - obj_file = container_dir + "/" + obj - if os.path.exists(container_dir) is True: - if os.path.exists(obj_file) is True: - with open(obj_file, "r") as f: - return self.object_headers[obj_file], f.read() - else: - raise ClientException("error_obj") - else: - raise ClientException("error_container") - - def delete_object(self, container, obj): - container_dir = self.swiftdir + "/" + container - obj_file = container_dir + "/" + obj - if os.path.exists(container_dir) is True: - if os.path.exists(obj_file) is True: - os.remove(obj_file) - self.object_headers.pop(obj_file) - else: - raise ClientException("error_obj") - else: - raise ClientException("error_container") diff --git a/karbor/tests/unit/protection/fakes.py b/karbor/tests/unit/protection/fakes.py deleted file mode 100644 index 9b9b4868..00000000 --- a/karbor/tests/unit/protection/fakes.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import futurist -from oslo_config import cfg -from oslo_log import log as logging -from taskflow import engines -from taskflow.patterns import graph_flow -from taskflow.patterns import linear_flow -from taskflow import task - -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection.graph import build_graph -from karbor.services.protection import protection_plugin -from karbor.services.protection import provider -from karbor.services.protection import resource_flow - -LOG = logging.getLogger(__name__) - -A = Resource(id='A', type='fake', name='fake') -B = Resource(id='B', type='fake', name='fake') -C = Resource(id='C', type='fake', name='fake') -D = Resource(id='D', type='fake', name='fake') -E = Resource(id='E', type='fake', name='fake') - - -resource_map = { - A: [C], - B: [C], - C: [D, E], - D: [], - E: [], -} - -resource_graph = build_graph([A, B, C, D], resource_map.__getitem__) - - -def fake_protection_plan(): - protection_plan = {'id': 'fake_id', - 'is_enabled': True, - 'name': 'fake_protection_plan', - 'comments': '', - 'revision': 0, - 'resources': [ - {"id": "A", "type": "fake", "name": "fake"}, - {"id": "B", "type": "fake", "name": "fake"}, - {"id": "C", "type": "fake", "name": "fake"}, - {"id": "D", "type": "fake", "name": "fake"}], - 'protection_provider': None, - 'parameters': {}, - 'provider_id': 'fake_id', - 'project_id': 'fake_project_id' - } - return protection_plan - - -plan_resources = [A, B, C, D] - - -class FakeBankPlugin(BankPlugin): - def __init__(self, config=None): - super(FakeBankPlugin, self).__init__(config=config) - self._objects = {} - fake_bank_opts = [ - cfg.HostAddressOpt('fake_host'), - ] - if config: - config.register_opts(fake_bank_opts, 'fake_bank') - self.fake_host = config['fake_bank']['fake_host'] - - def update_object(self, key, value, context=None): - self._objects[key] = value - - def get_object(self, key, context=None): - value = self._objects.get(key, None) - if value is None: - raise Exception - return value - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - objects_name = [] - if prefix is not None: - for key, value in self._objects.items(): - if key.find(prefix) == 0: - objects_name.append(key.lstrip(prefix)) - else: - objects_name = self._objects.keys() - return objects_name - - def delete_object(self, key, context=None): - self._objects.pop(key) - - def get_owner_id(self, context=None): - return - - -def fake_restore(): - restore = { - 'id': 'fake_id', - 'provider_id': 'fake_provider_id', - 'checkpoint_id': 'fake_checkpoint_id', - 'parameters': { - 'username': 'fake_username', - 'password': 'fake_password' - }, - 'restore_target': 'fake_target_url', - } - return restore - - -class FakeProtectablePlugin(object): - def get_resource_type(self): - pass - - def get_parent_resource_types(self): - pass - - def list_resources(self): - pass - - def get_dependent_resources(self, parent_resource): - pass - - -class MockOperation(protection_plugin.Operation): - def __init__(self): - super(MockOperation, self).__init__() - for hook_name in resource_flow.HOOKS: - setattr(self, hook_name, mock.Mock()) - - -class FakeOperation(protection_plugin.Operation): - def __init__(self): - super(FakeOperation, self).__init__() - self.all_invokes = {} - - def _update_invokes(self, resource, func, info): - self.all_invokes.setdefault(resource, {})[func] = info - - def on_prepare_begin(self, checkpoint, resource, context, parameters, - **kwargs): - info = { - 'checkpoint': checkpoint, - 'resource': resource, - 'context': context, - 'parameters': parameters, - 'kwargs': kwargs - } - self._update_invokes(resource, 'on_prepare_begin', info) - - def on_prepare_finish(self, checkpoint, resource, context, parameters, - **kwargs): - info = { - 'checkpoint': checkpoint, - 'resource': resource, - 'context': context, - 'parameters': parameters, - 'kwargs': kwargs - } - self._update_invokes(resource, 'on_prepare_finish', info) - - def on_main(self, checkpoint, resource, context, parameters, **kwargs): - info = { - 'checkpoint': checkpoint, - 'resource': resource, - 'context': context, - 'parameters': parameters, - 'kwargs': kwargs - } - self._update_invokes(resource, 'on_main', info) - - def on_complete(self, checkpoint, resource, context, parameters, **kwargs): - info = { - 'checkpoint': checkpoint, - 'resource': resource, - 'context': context, - 'parameters': parameters, - 'kwargs': kwargs - } - self._update_invokes(resource, 'on_complete', info) - - -class FakeProtectionPlugin(protection_plugin.ProtectionPlugin): - SUPPORTED_RESOURCES = [ - 'Test::ResourceA', - 'Test::ResourceB', - 'Test::ResourceC', - ] - - def __init__(self, config=None, *args, **kwargs): - super(FakeProtectionPlugin, self).__init__(config) - fake_plugin_opts = [ - cfg.StrOpt('fake_user'), - ] - if config: - config.register_opts(fake_plugin_opts, 'fake_plugin') - self.fake_user = config['fake_plugin']['fake_user'] - - def get_protect_operation(self, *args, **kwargs): - return MockOperation() - - def get_restore_operation(self, *args, **kwargs): - return MockOperation() - - def get_delete_operation(self, *args, **kwargs): - return MockOperation() - - def get_verify_operation(self, *args, **kwargs): - return MockOperation() - - @classmethod - def get_supported_resources_types(cls): - return cls.SUPPORTED_RESOURCES - - @classmethod - def get_options_schema(cls, resource_type): - return {} - - @classmethod - def get_saved_info_schema(cls, resource_type): - return {} - - @classmethod - def get_restore_schema(cls, resource_type): - return {} - - @classmethod - def get_saved_info(cls, metadata_store, resource): - return None - - -class FakeCheckpoint(object): - def __init__(self): - super(FakeCheckpoint, self).__init__() - self.id = 'fake_checkpoint' - self.status = 'available' - self.project_id = 'fake_project_id' - self.resource_graph = resource_graph - - def purge(self): - pass - - def commit(self): - pass - - def get_resource_bank_section(self, resource_id): - bank = Bank(FakeBankPlugin()) - return BankSection(bank, resource_id) - - def to_dict(self): - return { - "id": self.id, - "status": self.status, - "resource_graph": self.resource_graph, - "protection_plan": None, - "project_id": self.project_id - } - - -class FakeCheckpointCollection(object): - def create(self, plan, checkpoint_properties=None, - context=None): - return FakeCheckpoint() - - def get(self, checkpoint_id, - context=None): - return FakeCheckpoint() - - -class FakeProvider(provider.PluggableProtectionProvider): - def __init__(self): - self._id = 'test' - self._name = 'provider' - self._description = 'fake_provider' - self._extend_info_schema = {} - self._config = None - self._plugin_map = { - 'fake': FakeProtectionPlugin, - } - - def get_checkpoint_collection(self): - return FakeCheckpointCollection() - - -class FakeFlowEngine(object): - def create_task(self, function, requires=None, provides=None, - inject=None, **kwargs): - name = kwargs.get('name', None) - auto_extract = kwargs.get('auto_extract', True) - rebind = kwargs.get('rebind', None) - revert = kwargs.get('revert', None) - version = kwargs.get('version', None) - if function: - return task.FunctorTask(function, - name=name, - provides=provides, - requires=requires, - auto_extract=auto_extract, - rebind=rebind, - revert=revert, - version=version, - inject=inject) - - def add_tasks(self, flow, *nodes, **kwargs): - if flow is None: - LOG.error("The flow is None, get it first") - return - flow.add(*nodes, **kwargs) - - def link_task(self, flow, u, v): - flow.link(u, v) - - def build_flow(self, flow_name, flow_type='graph'): - if flow_type == 'linear': - return linear_flow.Flow(flow_name) - elif flow_type == 'graph': - return graph_flow.Flow(flow_name) - else: - LOG.error("unsupported flow type:%s", flow_type) - return - - def get_engine(self, flow, **kwargs): - if flow is None: - LOG.error("Flow is None, build it first") - return - executor = kwargs.get('executor', None) - engine = kwargs.get('engine', None) - store = kwargs.get('store', None) - if not executor: - executor = futurist.GreenThreadPoolExecutor() - if not engine: - engine = 'parallel' - flow_engine = engines.load(flow, - executor=executor, - engine=engine, - store=store) - return flow_engine - - def run_engine(self, flow_engine): - if flow_engine is None: - LOG.error("Flow engine is None,get it first") - return - flow_engine.run() - - def output(self, flow_engine, target=None): - if flow_engine is None: - LOG.error("Flow engine is None,return nothing") - return - if target: - return flow_engine.storage.fetch(target) - return flow_engine.storage.fetch_all() diff --git a/karbor/tests/unit/protection/test_bank.py b/karbor/tests/unit/protection/test_bank.py deleted file mode 100644 index 14b8283f..00000000 --- a/karbor/tests/unit/protection/test_bank.py +++ /dev/null @@ -1,263 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import OrderedDict -from copy import deepcopy -from oslo_utils import uuidutils - -from karbor import exception -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection.bank_plugin import LeasePlugin -from karbor.tests import base - - -class _InMemoryBankPlugin(BankPlugin): - def __init__(self, config=None): - super(_InMemoryBankPlugin, self).__init__(config) - self._data = OrderedDict() - - def update_object(self, key, value, context=None): - self._data[key] = value - - def get_object(self, key, context=None): - try: - return deepcopy(self._data[key]) - except KeyError: - raise exception.BankGetObjectFailed('no such object') - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - marker_found = marker is None - for key in self._data.keys(): - if marker is not True and key != marker: - if marker_found: - if prefix is None or key.startswith(prefix): - if limit is not None: - limit -= 1 - if limit < 0: - return - yield key - else: - marker_found = True - - def delete_object(self, key, context=None): - del self._data[key] - - def get_owner_id(self): - return uuidutils.generate_uuid() - - -class _InMemoryLeasePlugin(LeasePlugin): - - def acquire_lease(self): - pass - - def renew_lease(self): - pass - - def check_lease_validity(self): - return True - - -class BankSectionTest(base.TestCase): - INVALID_PATHS = ( - '/', - '/a$', - '/path/', - '/path/path/', - 'space space', - '/path/../dots/', - '/,', - ) - - VALID_PATHS = ( - '/key', - '/top/key', - '/top/middle/bottom/key', - '/all_kinds/of-char.acters/@path1', - ) - - def _create_test_bank(self): - return Bank(_InMemoryBankPlugin()) - - def test_empty_key(self): - bank = self._create_test_bank() - section = BankSection(bank, "/prefix", is_writable=True) - self.assertRaises( - exception.InvalidParameterValue, - section.update_object, - "", - "value", - ) - self.assertRaises( - exception.InvalidParameterValue, - section.update_object, - None, - "value", - ) - - def test_update_invalid_object(self): - bank = self._create_test_bank() - for path in self.INVALID_PATHS: - self.assertRaises( - exception.InvalidParameterValue, - bank.update_object, - path, - "value", - ) - - def test_get_invalid_object(self): - bank = self._create_test_bank() - for path in self.INVALID_PATHS: - self.assertRaises( - exception.InvalidParameterValue, - bank.get_object, - path, - ) - - def test_valid_object(self): - value1 = 'value1' - value2 = 'value2' - bank = self._create_test_bank() - for path in self.VALID_PATHS: - bank.update_object(path, value1) - bank.update_object(path, value2) - res = bank.get_object(path) - self.assertEqual(value2, res) - bank.delete_object(path) - self.assertRaises( - exception.BankGetObjectFailed, - bank.get_object, - path, - ) - - def test_delete_object(self): - bank = self._create_test_bank() - section = BankSection(bank, "/prefix", is_writable=True) - bank.update_object("/prefix/a", "value") - bank.update_object("/prefix/b", "value") - bank.update_object("/prefix/c", "value") - section.delete_object("a") - section.delete_object("/b") - section.delete_object("//c") - - def test_list_objects(self): - bank = self._create_test_bank() - section = BankSection(bank, "/prefix", is_writable=True) - bank.update_object("/prefix/KeyA", "value") - bank.update_object("/prefix", "value") - bank.update_object("/prefixKeyD", "value") # Should not appear - section.update_object("/KeyB", "value") - section.update_object("KeyC", "value") - expected_result = ["KeyA", "KeyB", "KeyC"] - self.assertEqual(expected_result, list(section.list_objects("/"))) - self.assertEqual(expected_result, list(section.list_objects("///"))) - self.assertEqual(expected_result, list(section.list_objects(None))) - self.assertEqual(expected_result, list(section.list_objects("Key"))) - self.assertEqual( - expected_result[:2], - list(section.list_objects("/", limit=2))) - self.assertEqual( - expected_result[2:4], - list(section.list_objects("/", limit=2, marker="KeyB"))) - - def test_list_objects_with_extra_prefix_and_marker(self): - bank = self._create_test_bank() - section = BankSection(bank, "/prefix", is_writable=True) - section.update_object("prefix1/KeyA", "value") - section.update_object("prefix2/KeyB", "value") - section.update_object("prefix2/KeyC", "value") - expected_result = ["prefix2/KeyC"] - self.assertEqual( - expected_result, - list(section.list_objects('/prefix2/', marker="KeyB")) - ) - - def test_read_only(self): - bank = self._create_test_bank() - section = BankSection(bank, "/prefix", is_writable=False) - self.assertRaises( - exception.BankReadonlyViolation, - section.update_object, - "object", - "value", - ) - bank.update_object("/prefix/object", "value") - self.assertRaises( - exception.BankReadonlyViolation, - section.update_object, - "object", - "value", - ) - self.assertRaises( - exception.BankReadonlyViolation, - section.delete_object, - "object", - ) - - def test_double_dot_key(self): - bank = self._create_test_bank() - section = BankSection(bank, "/prefix") - self.assertRaises( - exception.InvalidParameterValue, - section.update_object, - "/../../", - "", - ) - - def test_double_dot_section_prefix(self): - bank = self._create_test_bank() - self.assertRaises( - exception.InvalidParameterValue, - BankSection, - bank, - '/../../', - ) - - def test_nested_sections_get(self): - bank = self._create_test_bank() - top_section = BankSection(bank, "/top") - mid_section = top_section.get_sub_section("/mid") - bottom_section = mid_section.get_sub_section("/bottom") - bottom_section.update_object("key", "value") - self.assertEqual("value", bank.get_object("/top/mid/bottom/key")) - self.assertEqual("value", bottom_section.get_object("key")) - - def test_nested_sections_list(self): - bank = self._create_test_bank() - top_section = BankSection(bank, "/top") - mid_section = top_section.get_sub_section("/mid") - bottom_section = mid_section.get_sub_section("/bottom") - keys = ["KeyA", "KeyB", "KeyC"] - for key in keys: - bottom_section.update_object(key, "value") - - list_result = set(bottom_section.list_objects(prefix="Key")) - self.assertEqual(set(keys), list_result) - self.assertEqual( - keys[:2], - list(bottom_section.list_objects("/", limit=2))) - self.assertEqual( - keys[2:4], - list(bottom_section.list_objects("/", limit=2, marker="KeyB"))) - - def test_nested_sections_read_only(self): - bank = self._create_test_bank() - section = BankSection(bank, "/top", is_writable=False) - self.assertRaises( - exception.BankReadonlyViolation, - section.get_sub_section, - "/mid", - is_writable=True, - ) diff --git a/karbor/tests/unit/protection/test_checkpoint.py b/karbor/tests/unit/protection/test_checkpoint.py deleted file mode 100644 index fea8ee02..00000000 --- a/karbor/tests/unit/protection/test_checkpoint.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.resource import Resource -from karbor.services.protection import bank_plugin -from karbor.services.protection import checkpoint -from karbor.services.protection import graph - -from karbor.tests import base -from karbor.tests.unit.protection.fakes import fake_protection_plan -from karbor.tests.unit.protection.test_bank import _InMemoryBankPlugin -from karbor.tests.unit.protection.test_bank import _InMemoryLeasePlugin - -A = Resource(id="A", type="fake", name="fake") -B = Resource(id="B", type="fake", name="fake") -C = Resource(id="C", type="fake", name="fake") -D = Resource(id="D", type="fake", name="fake") -E = Resource(id="E", type="fake", name="fake") - -resource_map = { - A: [C], - B: [C], - C: [D, E], - D: [], - E: [], -} - - -class CheckpointTest(base.TestCase): - def test_create_in_section(self): - bank = bank_plugin.Bank(_InMemoryBankPlugin()) - bank_lease = _InMemoryLeasePlugin() - checkpoints_section = bank_plugin.BankSection(bank, "/checkpoints") - indices_section = bank_plugin.BankSection(bank, "/indices") - owner_id = bank.get_owner_id() - plan = fake_protection_plan() - cp = checkpoint.Checkpoint.create_in_section( - checkpoints_section=checkpoints_section, - indices_section=indices_section, - bank_lease=bank_lease, - owner_id=owner_id, - plan=plan) - checkpoint_data = cp._md_cache - self.assertEqual( - checkpoint_data, - bank._plugin.get_object( - "/checkpoints/%s/%s" % (checkpoint_data['id'], - checkpoint._INDEX_FILE_NAME) - ) - ) - self.assertEqual(owner_id, cp.owner_id) - self.assertEqual("protecting", cp.status) - - def test_resource_graph(self): - bank = bank_plugin.Bank(_InMemoryBankPlugin()) - bank_lease = _InMemoryLeasePlugin() - checkpoints_section = bank_plugin.BankSection(bank, "/checkpoints") - indices_section = bank_plugin.BankSection(bank, "/indices") - owner_id = bank.get_owner_id() - plan = fake_protection_plan() - cp = checkpoint.Checkpoint.create_in_section( - checkpoints_section=checkpoints_section, - indices_section=indices_section, - bank_lease=bank_lease, - owner_id=owner_id, - plan=plan) - - resource_graph = graph.build_graph([A, B, C, D], - resource_map.__getitem__) - cp.resource_graph = resource_graph - cp.commit() - checkpoint_data = cp._md_cache - self.assertEqual( - checkpoint_data, - bank._plugin.get_object( - "/checkpoints/%s/%s" % (checkpoint_data["id"], - checkpoint._INDEX_FILE_NAME) - ) - ) - self.assertEqual(len(resource_graph), len(cp.resource_graph)) - for start_node in resource_graph: - self.assertIn(start_node, cp.resource_graph) diff --git a/karbor/tests/unit/protection/test_checkpoint_collection.py b/karbor/tests/unit/protection/test_checkpoint_collection.py deleted file mode 100644 index 3ff7f47e..00000000 --- a/karbor/tests/unit/protection/test_checkpoint_collection.py +++ /dev/null @@ -1,231 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from unittest import mock - -from oslo_utils import timeutils - -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.checkpoint import CheckpointCollection -from karbor.tests import base -from karbor.tests.unit.protection.fakes import fake_protection_plan -from karbor.tests.unit.protection.test_bank import _InMemoryBankPlugin -from karbor.tests.unit.protection.test_bank import _InMemoryLeasePlugin - - -class CheckpointCollectionTest(base.TestCase): - def _create_test_collection(self): - return CheckpointCollection(Bank(_InMemoryBankPlugin()), - _InMemoryLeasePlugin()) - - def test_create_checkpoint(self): - collection = self._create_test_collection() - checkpoint = collection.create(fake_protection_plan()) - checkpoint.status = "finished" - checkpoint.commit() - self.assertEqual( - checkpoint.status, - collection.get(checkpoint_id=checkpoint.id).status, - ) - - def test_list_checkpoints(self): - collection = self._create_test_collection() - plan = fake_protection_plan() - provider_id = plan['provider_id'] - project_id = plan['project_id'] - result = {collection.create(plan).id for i in range(10)} - self.assertEqual(set(collection.list_ids( - project_id=project_id, provider_id=provider_id)), result) - - def test_list_checkpoints_with_all_tenants(self): - collection = self._create_test_collection() - plan_1 = fake_protection_plan() - plan_1["id"] = "fake_plan_id_1" - plan_1["project_id"] = "fake_project_id_1" - provider_id_1 = plan_1['provider_id'] - checkpoints_plan_1 = {collection.create(plan_1).id for i in range(10)} - - plan_2 = fake_protection_plan() - plan_2["id"] = "fake_plan_id_2" - plan_2["project_id"] = "fake_project_id_2" - checkpoints_plan_2 = {collection.create(plan_2).id for i in range(10)} - checkpoints_plan_1.update(checkpoints_plan_2) - self.assertEqual(set(collection.list_ids( - project_id="fake_project_id_1", provider_id=provider_id_1, - all_tenants=True)), checkpoints_plan_1) - - def test_list_checkpoints_by_plan_id(self): - collection = self._create_test_collection() - plan_1 = fake_protection_plan() - plan_1["id"] = "fake_plan_id_1" - plan_1['provider_id'] = "fake_provider_id_1" - plan_1["project_id"] = "fake_project_id_1" - provider_id_1 = plan_1['provider_id'] - checkpoints_plan_1 = {collection.create(plan_1).id for i in range(10)} - - plan_2 = fake_protection_plan() - plan_2["id"] = "fake_plan_id_2" - plan_2['provider_id'] = "fake_provider_id_2" - plan_2["project_id"] = "fake_project_id_2" - provider_id_2 = plan_1['provider_id'] - checkpoints_plan_2 = {collection.create(plan_2).id for i in range(10)} - self.assertEqual(set(collection.list_ids( - project_id="fake_project_id_1", provider_id=provider_id_1, - plan_id="fake_plan_id_1")), checkpoints_plan_1) - self.assertEqual(set(collection.list_ids( - project_id="fake_project_id_2", provider_id=provider_id_2, - plan_id="fake_plan_id_2")), checkpoints_plan_2) - - def test_list_checkpoints_by_plan_with_all_tenants(self): - collection = self._create_test_collection() - plan_1 = fake_protection_plan() - plan_1["id"] = "fake_plan_id_1" - plan_1["project_id"] = "fake_project_id_1" - provider_id_1 = plan_1['provider_id'] - checkpoints_plan_1 = {collection.create(plan_1).id for i in range(10)} - plan_1["project_id"] = "fake_project_id_2" - checkpoints_plan_2 = {collection.create(plan_1).id for i in range(10)} - checkpoints_plan_2.update(checkpoints_plan_1) - self.assertEqual(set(collection.list_ids( - project_id="fake_project_id_1", provider_id=provider_id_1, - plan_id='fake_plan_id_1', - all_tenants=True)), checkpoints_plan_2) - - def test_list_checkpoints_by_plan_id_and_filter_by_start_date(self): - collection = self._create_test_collection() - date1 = datetime.strptime("2018-11-12", "%Y-%m-%d") - date2 = datetime.strptime("2018-11-13", "%Y-%m-%d") - timeutils.utcnow = mock.MagicMock() - timeutils.utcnow.return_value = date1 - plan = fake_protection_plan() - plan["id"] = "fake_plan_id" - plan['provider_id'] = "fake_provider_id" - plan["project_id"] = "fake_project_id" - provider_id = plan['provider_id'] - checkpoints_plan_date1 = { - collection.create(plan).id for i in range(10)} - timeutils.utcnow = mock.MagicMock() - timeutils.utcnow.return_value = date2 - checkpoints_plan_date2 = { - collection.create(plan).id for i in range(10)} - self.assertEqual(set(collection.list_ids( - project_id="fake_project_id", provider_id=provider_id, - plan_id="fake_plan_id", start_date=date1, end_date=date1)), - checkpoints_plan_date1) - self.assertEqual(set(collection.list_ids( - project_id="fake_project_id", provider_id=provider_id, - plan_id="fake_plan_id", start_date=date2)), - checkpoints_plan_date2) - - def test_list_checkpoints_by_plan_with_marker(self): - collection = self._create_test_collection() - plan = fake_protection_plan() - plan["id"] = "fake_plan_id" - plan['provider_id'] = "fake_provider_id" - plan["project_id"] = "fake_project_id" - provider_id = plan['provider_id'] - checkpoints_plan = {collection.create(plan, { - 'checkpoint_id': i}).id for i in range(10)} - checkpoints_sorted = sorted(checkpoints_plan) - self.assertEqual(len(collection.list_ids( - project_id="fake_project_id", provider_id=provider_id, - plan_id="fake_plan_id", marker=checkpoints_sorted[0])) < 10, True) - - def test_list_checkpoints_by_date(self): - collection = self._create_test_collection() - date1 = datetime.strptime("2016-06-12", "%Y-%m-%d") - timeutils.utcnow = mock.MagicMock() - timeutils.utcnow.return_value = date1 - plan = fake_protection_plan() - provider_id = plan['provider_id'] - project_id = plan['project_id'] - checkpoints_date_1 = {collection.create(plan).id for i in range(10)} - date2 = datetime.strptime("2016-06-13", "%Y-%m-%d") - timeutils.utcnow = mock.MagicMock() - timeutils.utcnow.return_value = date2 - checkpoints_date_2 = {collection.create(plan).id for i in range(10)} - self.assertEqual(set(collection.list_ids( - project_id=project_id, - provider_id=provider_id, - start_date=date1, - end_date=date1)), - checkpoints_date_1) - self.assertEqual(set(collection.list_ids( - project_id=project_id, - provider_id=provider_id, - start_date=date2, - end_date=date2)), - checkpoints_date_2) - - def test_list_checkpoints_by_date_with_all_tenants(self): - collection = self._create_test_collection() - date1 = datetime.strptime("2018-11-15", "%Y-%m-%d") - timeutils.utcnow = mock.MagicMock() - timeutils.utcnow.return_value = date1 - plan_1 = fake_protection_plan() - plan_1["id"] = "fake_plan_id_1" - plan_1["project_id"] = "fake_project_id_1" - provider_id_1 = plan_1['provider_id'] - checkpoints_1 = {collection.create(plan_1).id for i in range(10)} - - date2 = datetime.strptime("2018-11-17", "%Y-%m-%d") - timeutils.utcnow = mock.MagicMock() - timeutils.utcnow.return_value = date2 - plan_1["id"] = "fake_plan_id_2" - plan_1["project_id"] = "fake_project_id_2" - checkpoints_2 = {collection.create(plan_1).id for i in range(10)} - checkpoints_2.update(checkpoints_1) - self.assertEqual(set(collection.list_ids( - project_id="fake_project_id_1", provider_id=provider_id_1, - start_date=date1, - all_tenants=True)), checkpoints_2) - - def test_list_checkpoints_by_date_with_marker(self): - collection = self._create_test_collection() - date = datetime.strptime("2018-11-12", "%Y-%m-%d") - timeutils.utcnow = mock.MagicMock() - timeutils.utcnow.return_value = date - plan = fake_protection_plan() - plan["id"] = "fake_plan_id" - plan['provider_id'] = "fake_provider_id" - plan["project_id"] = "fake_project_id" - provider_id = plan['provider_id'] - checkpoints_plan = {collection.create(plan, { - 'checkpoint_id': i}).id for i in range(10)} - checkpoints_sorted = sorted(checkpoints_plan) - self.assertEqual(len(collection.list_ids( - project_id="fake_project_id", provider_id=provider_id, - start_date=date, - marker=checkpoints_sorted[0])) < 10, True) - - def test_delete_checkpoint(self): - collection = self._create_test_collection() - plan = fake_protection_plan() - provider_id = plan['provider_id'] - project_id = plan['project_id'] - result = {collection.create(plan).id for i in range(10)} - checkpoint = collection.get(result.pop()) - checkpoint.purge() - self.assertEqual(set(collection.list_ids( - project_id=project_id, provider_id=provider_id)), result) - - def test_write_checkpoint_with_invalid_lease(self): - collection = self._create_test_collection() - checkpoint = collection.create(fake_protection_plan()) - collection._bank_lease.check_lease_validity = mock.MagicMock() - collection._bank_lease.check_lease_validity.return_value = False - checkpoint.status = "finished" - self.assertNotEqual( - checkpoint.status, - collection.get(checkpoint_id=checkpoint.id).status, - ) diff --git a/karbor/tests/unit/protection/test_cinder_freezer_protection_plugin.py b/karbor/tests/unit/protection/test_cinder_freezer_protection_plugin.py deleted file mode 100644 index d9d50d8c..00000000 --- a/karbor/tests/unit/protection/test_cinder_freezer_protection_plugin.py +++ /dev/null @@ -1,209 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection import client_factory -from karbor.services.protection.protection_plugins.volume import \ - volume_freezer_plugin_schemas -from karbor.services.protection.protection_plugins.volume.\ - volume_freezer_plugin import FreezerProtectionPlugin -from karbor.tests import base - - -class FakeBankPlugin(BankPlugin): - def update_object(self, key, value, context=None): - return - - def get_object(self, key, context=None): - return - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - return - - def delete_object(self, key, context=None): - return - - def get_owner_id(self, context=None): - return - - -fake_bank = Bank(FakeBankPlugin()) -fake_bank_section = BankSection(bank=fake_bank, section="fake") - -ResourceNode = collections.namedtuple( - "ResourceNode", - ["value", - "child_nodes"] -) - -Job = collections.namedtuple( - "Job", - ["job_schedule"] -) - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class FakeCheckpoint(object): - def __init__(self): - self.bank_section = fake_bank_section - self.id = "fake_id" - - def get_resource_bank_section(self, resource_id): - return self.bank_section - - -class VolumeFreezerProtectionPluginTest(base.TestCase): - def setUp(self): - super(VolumeFreezerProtectionPluginTest, self).setUp() - - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='freezer_protection_plugin', - poll_interval=0, - ) - - self.plugin = FreezerProtectionPlugin(plugin_config) - self._public_url = 'http://127.0.0.1/v2.0' - cfg.CONF.set_default('freezer_endpoint', - self._public_url, - 'freezer_client') - # due to freezer client bug, auth_uri should be specified - cfg.CONF.set_default('auth_uri', - 'http://127.0.0.1/v2.0', - 'freezer_client') - self.cntxt = RequestContext(user_id='demo', - project_id='fake_project_id', - auth_token='fake_token') - - self.freezer_client = client_factory.ClientFactory.create_client( - 'freezer', self.cntxt - ) - self.checkpoint = FakeCheckpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_freezer_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_freezer_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_freezer_plugin_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.protection_plugins.volume.' - 'volume_freezer_plugin.utils.status_poll') - @mock.patch('karbor.services.protection.clients.freezer.create') - def test_create_backup(self, mock_freezer_create, mock_status_poll): - resource = Resource(id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name='fake') - - fake_bank_section.update_object = mock.MagicMock() - protect_operation = self.plugin.get_protect_operation(resource) - mock_freezer_create.return_value = self.freezer_client - mock_status_poll.return_value = True - - self.freezer_client.clients.list = mock.MagicMock() - self.freezer_client.clients.list.return_value = [ - { - 'client_id': 'fake_client_id' - } - ] - - self.freezer_client.jobs.create = mock.MagicMock() - self.freezer_client.jobs.create.return_value = "123" - self.freezer_client.jobs.start_job = mock.MagicMock() - self.freezer_client.jobs.get = mock.MagicMock() - self.freezer_client.jobs.get.return_value = { - 'job_actions': [] - } - self.freezer_client.jobs.delete = mock.MagicMock() - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.volume.' - 'volume_freezer_plugin.utils.status_poll') - @mock.patch('karbor.services.protection.clients.freezer.create') - def test_delete_backup(self, mock_freezer_create, mock_status_poll): - resource = Resource(id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name='fake') - delete_operation = self.plugin.get_delete_operation(resource) - fake_bank_section.update_object = mock.MagicMock() - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = { - 'job_info': { - 'description': '123', - 'job_actions': [{ - 'freezer_action': { - 'backup_name': 'test', - 'action': 'backup', - 'mode': 'cinder', - 'cinder_vol_id': 'test', - 'storage': 'swift', - 'container': 'karbor/123' - } - }] - } - } - mock_freezer_create.return_value = self.freezer_client - mock_status_poll.return_value = True - self.freezer_client.jobs.create = mock.MagicMock() - self.freezer_client.jobs.create.return_value = '321' - self.freezer_client.jobs.start_job = mock.MagicMock() - self.freezer_client.jobs.get = mock.MagicMock() - self.freezer_client.jobs.get.return_value = { - 'job_actions': [] - } - self.freezer_client.jobs.delete = mock.MagicMock() - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual(types, - [constants.VOLUME_RESOURCE_TYPE]) diff --git a/karbor/tests/unit/protection/test_cinder_glance_plugin.py b/karbor/tests/unit/protection/test_cinder_glance_plugin.py deleted file mode 100644 index 68d7e855..00000000 --- a/karbor/tests/unit/protection/test_cinder_glance_plugin.py +++ /dev/null @@ -1,274 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import constants -from karbor.context import RequestContext -from karbor import exception -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection import client_factory -from karbor.services.protection.protection_plugins.volume.\ - volume_glance_plugin import VolumeGlanceProtectionPlugin -from karbor.services.protection.protection_plugins.volume import \ - volume_glance_plugin_schemas -from karbor.tests import base - - -class FakeBankPlugin(BankPlugin): - def update_object(self, key, value, context=None): - return - - def get_object(self, key, context=None): - return - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - return - - def delete_object(self, key, context=None): - return - - def get_owner_id(self, context=None): - return - - -fake_bank = Bank(FakeBankPlugin()) -fake_bank_section = BankSection(bank=fake_bank, section="fake") - -ResourceNode = collections.namedtuple( - "ResourceNode", - ["value", - "child_nodes"] -) - - -Volume = collections.namedtuple( - "Volume", - ["id", "status", "size"] -) - -Snapshot = collections.namedtuple( - "Snapshot", - ["id", "status", "size"] -) - -Image = collections.namedtuple( - "Image", - ["disk_format", - "container_format", - "status", - "id"] -) - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class FakeCheckpoint(object): - def __init__(self): - super(FakeCheckpoint, self).__init__() - self.bank_section = fake_bank_section - - def get_resource_bank_section(self, resource_id=None): - return self.bank_section - - -class VolumeGlanceProtectionPluginTest(base.TestCase): - def setUp(self): - super(VolumeGlanceProtectionPluginTest, self).setUp() - - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='volume_glance_plugin', - poll_interval=0, - backup_image_object_size=65536 - ) - self.plugin = VolumeGlanceProtectionPlugin(plugin_config) - cfg.CONF.set_default('glance_endpoint', - 'http://127.0.0.1:9292', - 'glance_client') - - cfg.CONF.set_default('cinder_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'cinder_client') - self.cntxt = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh' - ) - self.cinder_client = client_factory.ClientFactory.create_client( - "cinder", self.cntxt) - self.glance_client = client_factory.ClientFactory.create_client( - "glance", self.cntxt) - self.checkpoint = FakeCheckpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_glance_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_glance_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_glance_plugin_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.protection_plugins' - '.utils.status_poll') - @mock.patch('karbor.services.protection.clients.glance.create') - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_create_backup(self, mock_cinder_create, - mock_glance_create, mock_status_poll): - resource = Resource(id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name='fake') - - fake_bank_section.update_object = mock.MagicMock() - - protect_operation = self.plugin.get_protect_operation(resource) - mock_cinder_create.return_value = self.cinder_client - mock_glance_create.return_value = self.glance_client - mock_status_poll.return_value = True - self.cinder_client.volume_snapshots.create = mock.MagicMock() - self.cinder_client.volume_snapshots.create.return_value = Snapshot( - id="1234", - status="available", - size='100000000' - ) - self.cinder_client.volume_snapshots.get = mock.MagicMock() - self.cinder_client.volume_snapshots.get.return_value = Snapshot( - id="1234", - status="available", - size='100000000' - ) - self.cinder_client.volumes.create = mock.MagicMock() - self.cinder_client.volumes.create.return_value = Volume( - id='2345', - status='available', - size=1 - ) - self.cinder_client.volumes.get = mock.MagicMock() - self.cinder_client.volumes.get.return_value = Volume( - id='2345', - status='available', - size=1 - ) - self.cinder_client.volumes.upload_to_image = mock.MagicMock() - self.cinder_client.volumes.upload_to_image.return_value = [202, { - 'os-volume_upload_image': { - 'image_id': "3456" - } - }] - - self.glance_client.images.get = mock.MagicMock() - self.glance_client.images.return_value = Image( - disk_format="raw", - container_format="bare", - status="active", - id="3456" - ) - fake_bank_section.update_object = mock.MagicMock() - self.glance_client.images.data = mock.MagicMock() - self.glance_client.images.data.return_value = [] - mock_status_poll.return_value = True - - self.cinder_client.volume_snapshots.delete = mock.MagicMock() - self.cinder_client.volumes.delete = mock.MagicMock() - self.glance_client.images.delete = mock.MagicMock() - - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - self.cinder_client.volumes.upload_to_image.assert_called_with( - volume=Volume(id='2345', status='available', size=1), - force=True, - image_name='temporary_image_of_2345', - container_format="bare", - disk_format="raw", - visibility="private", - protected=False - ) - - def test_delete_backup(self): - resource = Resource(id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name='fake') - - fake_bank_section.list_objects = mock.MagicMock() - fake_bank_section.list_objects.return_value = ["data_1", "data_2"] - fake_bank_section.delete_object = mock.MagicMock() - delete_operation = self.plugin.get_delete_operation(resource) - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual([constants.VOLUME_RESOURCE_TYPE], types) - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - def test_verify_backup(self, mock_update_verify): - resource = Resource(id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name='fake') - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = 'available' - - verify_operation = self.plugin.get_verify_operation(resource) - call_hooks(verify_operation, self.checkpoint, resource, self.cntxt, - {}) - mock_update_verify.assert_called_with( - None, resource.type, resource.id, 'available') - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - def test_verify_backup_with_error_status(self, mock_update_verify): - resource = Resource(id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name='fake') - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = 'error' - - verify_operation = self.plugin.get_verify_operation(resource) - self.assertRaises( - exception.VerifyResourceFailed, call_hooks, verify_operation, - self.checkpoint, resource, self.cntxt, {}) - mock_update_verify.assert_called_with( - None, resource.type, resource.id, 'error', - 'The status of volume backup status is error.') diff --git a/karbor/tests/unit/protection/test_cinder_protection_plugin.py b/karbor/tests/unit/protection/test_cinder_protection_plugin.py deleted file mode 100644 index 22e51171..00000000 --- a/karbor/tests/unit/protection/test_cinder_protection_plugin.py +++ /dev/null @@ -1,470 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from cinderclient import exceptions as cinder_exc -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import constants -from karbor.context import RequestContext -from karbor import exception -from karbor.resource import Resource -from karbor.services.protection import bank_plugin -from karbor.services.protection import client_factory -from karbor.services.protection.protection_plugins.volume. \ - cinder_protection_plugin import CinderBackupProtectionPlugin -from karbor.services.protection.protection_plugins.volume \ - import volume_plugin_cinder_schemas as cinder_schemas -from karbor.tests import base -from karbor.tests.unit.protection import fakes - - -ResourceNode = collections.namedtuple( - "ResourceNode", - ["value", - "child_nodes"] -) - -Image = collections.namedtuple( - "Image", - ["disk_format", - "container_format", - "status"] -) - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class FakeCheckpoint(object): - def __init__(self, section): - super(FakeCheckpoint, self).__init__() - self.bank_section = section - self.id = "fake_id" - - def get_resource_bank_section(self, resource_id=None): - return self.bank_section - - -class BackupResponse(object): - def __init__(self, bkup_id, final_status, working_status, time_to_work): - super(BackupResponse, self).__init__() - self._final_status = final_status - self._working_status = working_status - self._time_to_work = time_to_work - self._id = bkup_id - - def __call__(self, *args, **kwargs): - res = mock.Mock() - res.id = self._id - if self._time_to_work > 0: - self._time_to_work -= 1 - res.status = self._working_status - else: - res.status = self._final_status - if res.status == 'not-found': - raise cinder_exc.NotFound(403) - return res - - -class RestoreResponse(object): - def __init__(self, volume_id, raise_except=False): - self._volume_id = volume_id - self._raise_except = raise_except - - def __call__(self, *args, **kwargs): - if self._raise_except: - raise exception.KarborException() - - res = mock.Mock() - res.volume_id = self._volume_id - return res - - -class CinderProtectionPluginTest(base.TestCase): - def setUp(self): - super(CinderProtectionPluginTest, self).setUp() - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='cinder_backup_protection_plugin', - poll_interval=0, - ) - self.plugin = CinderBackupProtectionPlugin(plugin_config) - cfg.CONF.set_default('cinder_endpoint', - 'http://127.0.0.1:8776/v2', - 'cinder_client') - - self.cntxt = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh') - self.cinder_client = client_factory.ClientFactory.create_client( - "cinder", self.cntxt) - - def _get_checkpoint(self): - fake_bank = bank_plugin.Bank(fakes.FakeBankPlugin()) - fake_bank_section = bank_plugin.BankSection( - bank=fake_bank, - section="fake" - ) - return FakeCheckpoint(fake_bank_section) - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - 'OS::Cinder::Volume') - self.assertEqual(options_schema, cinder_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - 'OS::Cinder::Volume') - self.assertEqual(options_schema, cinder_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - 'OS::Cinder::Volume') - self.assertEqual(options_schema, - cinder_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_protect_succeed(self, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="test", - ) - checkpoint = self._get_checkpoint() - section = checkpoint.get_resource_bank_section() - operation = self.plugin.get_protect_operation(resource) - section.update_object = mock.MagicMock() - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - volumes=mock.DEFAULT, - backups=mock.DEFAULT, - volume_snapshots=mock.DEFAULT, - ) as mocks: - mocks['volumes'].get.return_value = mock.Mock() - mocks['volumes'].get.return_value.status = 'available' - mocks['backups'].create = BackupResponse( - '456', 'creating', '---', 0) - mocks['backups'].get = BackupResponse( - '456', 'available', 'creating', 2) - mocks['volume_snapshots'].get.return_value = BackupResponse( - '789', 'creating', '---', 0) - mocks['volume_snapshots'].get = BackupResponse( - '789', 'available', 'creating', 2) - call_hooks(operation, checkpoint, resource, self.cntxt, {}) - - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_protect_fail_backup(self, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="test", - ) - checkpoint = self._get_checkpoint() - operation = self.plugin.get_protect_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - volumes=mock.DEFAULT, - backups=mock.DEFAULT, - volume_snapshots=mock.DEFAULT, - ) as mocks: - mocks['volumes'].get.return_value = mock.Mock() - mocks['volumes'].get.return_value.status = 'available' - mocks['backups'].backups.create = BackupResponse( - '456', 'creating', '---', 0) - mocks['backups'].backups.get = BackupResponse( - '456', 'error', 'creating', 2) - mocks['volume_snapshots'].get.return_value = BackupResponse( - '789', 'creating', '---', 0) - mocks['volume_snapshots'].get = BackupResponse( - '789', 'available', 'creating', 2) - self.assertRaises( - exception.CreateResourceFailed, - call_hooks, - operation, - checkpoint, - resource, - self.cntxt, - {} - ) - - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_protect_fail_snapshot(self, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="test", - ) - checkpoint = self._get_checkpoint() - operation = self.plugin.get_protect_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - volumes=mock.DEFAULT, - backups=mock.DEFAULT, - volume_snapshots=mock.DEFAULT, - ) as mocks: - mocks['volumes'].get.return_value = mock.Mock() - mocks['volumes'].get.return_value.status = 'available' - mocks['backups'].backups.create = BackupResponse( - '456', 'creating', '---', 0) - mocks['backups'].backups.get = BackupResponse( - '456', 'available', 'creating', 2) - mocks['volume_snapshots'].get.return_value = BackupResponse( - '789', 'creating', '---', 0) - mocks['volume_snapshots'].get = BackupResponse( - '789', 'error', 'creating', 2) - self.assertRaises( - exception.CreateResourceFailed, - call_hooks, - operation, - checkpoint, - resource, - self.cntxt, - {} - ) - - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_protect_fail_volume(self, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="test", - ) - checkpoint = self._get_checkpoint() - operation = self.plugin.get_protect_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - volumes=mock.DEFAULT, - backups=mock.DEFAULT, - volume_snapshots=mock.DEFAULT, - ) as mocks: - mocks['volumes'].get.return_value = mock.Mock() - mocks['volumes'].get.return_value.status = 'error' - mocks['backups'].backups.create = BackupResponse( - '456', 'creating', '---', 0) - mocks['backups'].backups.get = BackupResponse( - '456', 'error', 'creating', 2) - mocks['volume_snapshots'].get.return_value = BackupResponse( - '789', 'creating', '---', 0) - mocks['volume_snapshots'].get = BackupResponse( - '789', 'available', 'creating', 2) - self.assertRaises( - exception.CreateResourceFailed, - call_hooks, - operation, - checkpoint, - resource, - self.cntxt, - {} - ) - - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_delete_succeed(self, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="test", - ) - checkpoint = self._get_checkpoint() - section = checkpoint.get_resource_bank_section() - section.update_object('metadata', { - 'backup_id': '456', - }) - operation = self.plugin.get_delete_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.object(self.cinder_client, 'backups') as backups: - backups.delete = BackupResponse('456', 'deleting', '---', 0) - backups.get = BackupResponse('456', 'not-found', 'deleting', 2) - call_hooks(operation, checkpoint, resource, self.cntxt, {}) - - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_delete_fail(self, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="test", - ) - checkpoint = self._get_checkpoint() - section = checkpoint.get_resource_bank_section() - section.update_object('metadata', { - 'backup_id': '456', - }) - operation = self.plugin.get_delete_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.object(self.cinder_client, 'backups') as backups: - backups.delete = BackupResponse('456', 'deleting', '---', 0) - backups.get = BackupResponse('456', 'error', 'deleting', 2) - self.assertRaises( - exception.DeleteResourceFailed, - call_hooks, - operation, - checkpoint, - resource, - self.cntxt, - {} - ) - - @mock.patch('karbor.services.protection.clients.cinder.create') - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_restore_result') - def test_restore_succeed(self, mock_update_restore, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="fake", - ) - checkpoint = self._get_checkpoint() - section = checkpoint.get_resource_bank_section() - section.update_object('metadata', { - 'backup_id': '456', - }) - - parameters = { - "restore_name": "karbor restore volume", - "restore_description": "karbor restore", - } - - operation = self.plugin.get_restore_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - volumes=mock.DEFAULT, - restores=mock.DEFAULT, - ) as mocks: - volume_id = 456 - mocks['volumes'].get.return_value = mock.Mock() - mocks['volumes'].get.return_value.status = 'available' - mocks['restores'].restore = RestoreResponse(volume_id) - call_hooks(operation, checkpoint, resource, self.cntxt, parameters, - **{'restore': None, 'new_resources': {}}) - mocks['volumes'].update.assert_called_with( - volume_id, - **{'name': parameters['restore_name'], - 'description': parameters['restore_description']}) - mock_update_restore.assert_called_with( - None, resource.type, volume_id, 'available') - - @mock.patch('karbor.services.protection.clients.cinder.create') - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - def test_verify_succeed(self, mock_update_verify, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="fake", - ) - checkpoint = self._get_checkpoint() - section = checkpoint.get_resource_bank_section() - section.update_object('metadata', { - 'backup_id': '456', - }) - parameters = {} - - operation = self.plugin.get_verify_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - backups=mock.DEFAULT, - volumes=mock.DEFAULT, - ) as mocks: - volume_id = '123' - mocks['backups'].get.return_value = mock.Mock() - mocks['backups'].get.return_value.status = 'available' - call_hooks(operation, checkpoint, resource, self.cntxt, parameters, - **{'verify': None, 'new_resources': {}}) - mock_update_verify.assert_called_with( - None, resource.type, volume_id, 'available') - - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_restore_fail_volume_0(self, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="fake", - ) - checkpoint = self._get_checkpoint() - section = checkpoint.get_resource_bank_section() - section.update_object('metadata', { - 'backup_id': '456', - }) - - operation = self.plugin.get_restore_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - restores=mock.DEFAULT, - ) as mocks: - mocks['restores'].restore = RestoreResponse(0, True) - self.assertRaises( - exception.KarborException, call_hooks, - operation, checkpoint, resource, self.cntxt, - {}, **{'restore': None}) - - @mock.patch('karbor.services.protection.clients.cinder.create') - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_restore_result') - def test_restore_fail_volume_1(self, mock_update_restore, - mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="fake", - ) - checkpoint = self._get_checkpoint() - section = checkpoint.get_resource_bank_section() - section.update_object('metadata', { - 'backup_id': '456', - }) - - operation = self.plugin.get_restore_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - volumes=mock.DEFAULT, - restores=mock.DEFAULT, - ) as mocks: - volume_id = 456 - mocks['volumes'].get.return_value = mock.Mock() - mocks['volumes'].get.return_value.status = 'error' - mocks['restores'].restore = RestoreResponse(volume_id) - self.assertRaises( - exception.RestoreResourceFailed, call_hooks, - operation, checkpoint, resource, self.cntxt, - {}, **{'restore': None}) - - mock_update_restore.assert_called_with( - None, resource.type, volume_id, - constants.RESOURCE_STATUS_ERROR, 'Error creating volume') - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual([constants.VOLUME_RESOURCE_TYPE], types) diff --git a/karbor/tests/unit/protection/test_cinder_snapshot_protection_plugin.py b/karbor/tests/unit/protection/test_cinder_snapshot_protection_plugin.py deleted file mode 100644 index 6a6caf0a..00000000 --- a/karbor/tests/unit/protection/test_cinder_snapshot_protection_plugin.py +++ /dev/null @@ -1,234 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection import client_factory -from karbor.services.protection.protection_plugins. \ - volume.volume_snapshot_plugin import VolumeSnapshotProtectionPlugin -from karbor.services.protection.protection_plugins.volume \ - import volume_snapshot_plugin_schemas -from karbor.tests import base - - -class FakeBankPlugin(BankPlugin): - def update_object(self, key, value, context=None): - return - - def get_object(self, key, context=None): - return - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - return - - def delete_object(self, key, context=None): - return - - def get_owner_id(self, context=None): - return - - -fake_bank = Bank(FakeBankPlugin()) -fake_bank_section = BankSection(bank=fake_bank, - section="fake") - -ResourceNode = collections.namedtuple( - "ResourceNode", - ["value", - "child_nodes"] -) - -Volume = collections.namedtuple( - "Volume", - ["status"] -) - -Snapshot = collections.namedtuple( - "Snapshot", - ["id", "status"] -) - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class FakeCheckpoint(object): - def __init__(self): - self.bank_section = fake_bank_section - - def get_resource_bank_section(self, resource_id): - return self.bank_section - - -class CinderSnapshotProtectionPluginTest(base.TestCase): - def setUp(self): - super(CinderSnapshotProtectionPluginTest, self).setUp() - - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='volume_snapshot_plugin', - poll_interval=0, - ) - - self.plugin = VolumeSnapshotProtectionPlugin(plugin_config) - - cfg.CONF.set_default('cinder_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'cinder_client') - service_catalog = [ - {'type': 'volumev3', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}], - }, - ] - self.cntxt = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - self.cinder_client = client_factory.ClientFactory.create_client( - "cinder", self.cntxt) - self.checkpoint = FakeCheckpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_snapshot_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_snapshot_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - constants.VOLUME_RESOURCE_TYPE) - self.assertEqual(options_schema, - volume_snapshot_plugin_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.protection_plugins.' - 'utils.status_poll') - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_create_snapshot(self, mock_cinder_create, mock_status_poll): - resource = Resource(id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name='fake') - - fake_bank_section.update_object = mock.MagicMock() - - protect_operation = self.plugin.get_protect_operation(resource) - mock_cinder_create.return_value = self.cinder_client - - self.cinder_client.volumes.get = mock.MagicMock() - self.cinder_client.volumes.return_value = Volume( - status="available" - ) - fake_bank_section.update_object = mock.MagicMock() - self.cinder_client.volume_snapshots.create = mock.MagicMock() - self.cinder_client.volume_snapshots.create.return_value = Snapshot( - id="1234", - status="available" - ) - self.cinder_client.volume_snapshots.get = mock.MagicMock() - self.cinder_client.volume_snapshots.get.return_value = Snapshot( - id="1234", - status="available" - ) - mock_status_poll.return_value = True - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.' - 'utils.status_poll') - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_delete_snapshot(self, mock_cinder_create, mock_status_poll): - resource = Resource(id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name='fake') - mock_cinder_create.return_value = self.cinder_client - self.cinder_client.volume_snapshots.get = mock.MagicMock() - self.cinder_client.volume_snapshots.get.return_value = Snapshot( - id="1234", - status="available" - ) - self.cinder_client.volume_snapshots.delete = mock.MagicMock() - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = { - "snapshot_id": "1234"} - - mock_status_poll.return_value = True - delete_operation = self.plugin.get_delete_operation(resource) - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual([constants.VOLUME_RESOURCE_TYPE], types) - - @mock.patch('karbor.services.protection.clients.cinder.create') - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - def test_verify_succeed(self, mock_update_verify, mock_cinder_create): - resource = Resource( - id="123", - type=constants.VOLUME_RESOURCE_TYPE, - name="fake", - ) - checkpoint = self.checkpoint - section = checkpoint.get_resource_bank_section(resource) - section.update_object('metadata', { - 'snapshot_id': '456', - }) - parameters = {} - - operation = self.plugin.get_verify_operation(resource) - mock_cinder_create.return_value = self.cinder_client - with mock.patch.multiple( - self.cinder_client, - volume_snapshots=mock.DEFAULT, - volumes=mock.DEFAULT, - ) as mocks: - volume_id = '123' - mocks['volume_snapshots'].get.return_value = mock.Mock() - mocks['volume_snapshots'].get.return_value.status = 'available' - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = { - "snapshot_id": "456"} - call_hooks(operation, checkpoint, resource, self.cntxt, parameters, - **{'verify': None, 'new_resources': {}}) - mock_update_verify.assert_called_with( - None, resource.type, volume_id, 'available') diff --git a/karbor/tests/unit/protection/test_client_factory.py b/karbor/tests/unit/protection/test_client_factory.py deleted file mode 100644 index f9a746a8..00000000 --- a/karbor/tests/unit/protection/test_client_factory.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from karbor.services.protection.client_factory import ClientFactory -from karbor.tests import base - - -class ClientFactoryTest(base.TestCase): - - @mock.patch('oslo_utils.importutils.import_module') - def test_get_client_module_with_import_error(self, mock_import_module): - mock_import_module.side_effect = ImportError - ClientFactory._factory = None - client_module = ClientFactory.get_client_module('cinder') - self.assertEqual(None, client_module) diff --git a/karbor/tests/unit/protection/test_database_protection_plugin.py b/karbor/tests/unit/protection/test_database_protection_plugin.py deleted file mode 100644 index 6073cd04..00000000 --- a/karbor/tests/unit/protection/test_database_protection_plugin.py +++ /dev/null @@ -1,224 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection import client_factory -from karbor.services.protection.protection_plugins. \ - database.database_backup_plugin import DatabaseBackupProtectionPlugin -from karbor.services.protection.protection_plugins.database \ - import database_backup_plugin_schemas -from karbor.tests import base - - -class FakeBankPlugin(BankPlugin): - def update_object(self, key, value, context=None): - return - - def get_object(self, key, context=None): - return - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - return - - def delete_object(self, key, context=None): - return - - def get_owner_id(self, context=None): - return - - -fake_bank = Bank(FakeBankPlugin()) -fake_bank_section = BankSection(bank=fake_bank, section="fake") - -ResourceNode = collections.namedtuple( - "ResourceNode", - ["value", - "child_nodes"] -) - -Database = collections.namedtuple( - "Database", - ["status"] -) - -Backup = collections.namedtuple( - "Backup", - ["id", "status"] -) - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class FakeCheckpoint(object): - def __init__(self): - self.bank_section = fake_bank_section - - def get_resource_bank_section(self, resource_id): - return self.bank_section - - -class TroveProtectionPluginTest(base.TestCase): - def setUp(self): - super(TroveProtectionPluginTest, self).setUp() - - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='database_backup_plugin', - poll_interval=0, - ) - - self.plugin = DatabaseBackupProtectionPlugin(plugin_config) - - cfg.CONF.set_default('trove_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'trove_client') - service_catalog = [ - {'type': 'database', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}], - }, - ] - self.cntxt = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - self.trove_client = client_factory.ClientFactory.create_client( - "trove", self.cntxt) - self.checkpoint = FakeCheckpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - constants.DATABASE_RESOURCE_TYPE) - self.assertEqual(options_schema, - database_backup_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - constants.DATABASE_RESOURCE_TYPE) - self.assertEqual(options_schema, - database_backup_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - constants.DATABASE_RESOURCE_TYPE) - self.assertEqual(options_schema, - database_backup_plugin_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.protection_plugins.database.' - 'database_backup_plugin.utils.status_poll') - @mock.patch('karbor.services.protection.clients.trove.create') - def test_create_backup(self, mock_trove_create, mock_status_poll): - resource = Resource(id="123", - type=constants.DATABASE_RESOURCE_TYPE, - name='fake') - - fake_bank_section.update_object = mock.MagicMock() - - protect_operation = self.plugin.get_protect_operation(resource) - mock_trove_create.return_value = self.trove_client - - self.trove_client.instances.get = mock.MagicMock() - self.trove_client.instances.return_value = Database( - status="ACTIVE" - ) - fake_bank_section.update_object = mock.MagicMock() - self.trove_client.backups.create = mock.MagicMock() - self.trove_client.backups.create.return_value = Backup( - id="1234", - status="COMPLETED" - ) - self.trove_client.backups.get = mock.MagicMock() - self.trove_client.backups.get.return_value = Backup( - id="1234", - status="COMPLETED" - ) - mock_status_poll.return_value = True - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.database.' - 'database_backup_plugin.utils.status_poll') - @mock.patch('karbor.services.protection.clients.trove.create') - def test_delete_backup(self, mock_trove_create, mock_status_poll): - resource = Resource(id="123", - type=constants.DATABASE_RESOURCE_TYPE, - name='fake') - mock_trove_create.return_value = self.trove_client - self.trove_client.backups.get = mock.MagicMock() - self.trove_client.backups.get.return_value = Backup( - id="1234", - status="COMPLETED" - ) - self.trove_client.backups.delete = mock.MagicMock() - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = { - "backup_id": "1234"} - - mock_status_poll.return_value = True - delete_operation = self.plugin.get_delete_operation(resource) - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - @mock.patch('karbor.services.protection.clients.trove.create') - def test_verify_backup(self, mock_trove_create, mock_update_verify): - resource = Resource(id="123", - type=constants.DATABASE_RESOURCE_TYPE, - name='fake') - mock_trove_create.return_value = self.trove_client - self.trove_client.backups.get = mock.MagicMock() - self.trove_client.backups.get.return_value = Backup( - id="1234", - status="COMPLETED" - ) - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = { - "backup_id": "1234"} - - verify_operation = self.plugin.get_verify_operation(resource) - call_hooks(verify_operation, self.checkpoint, resource, self.cntxt, - {}) - mock_update_verify.assert_called_with( - None, resource.type, resource.id, 'available') - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual(types, - [constants.DATABASE_RESOURCE_TYPE]) diff --git a/karbor/tests/unit/protection/test_file_system_bank_plugin.py b/karbor/tests/unit/protection/test_file_system_bank_plugin.py deleted file mode 100644 index f07c76ac..00000000 --- a/karbor/tests/unit/protection/test_file_system_bank_plugin.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import tempfile - -from oslo_config import cfg -from oslo_config import fixture -from oslo_utils import importutils - -from karbor import exception -from karbor.tests import base - - -class FileSystemBankPluginTest(base.TestCase): - def setUp(self): - super(FileSystemBankPluginTest, self).setUp() - - import_str = ( - "karbor.services.protection.bank_plugins." - "file_system_bank_plugin.FileSystemBankPlugin" - ) - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='file_system_bank_plugin', - file_system_bank_path=tempfile.mkdtemp(), - ) - fs_bank_plugin_cls = importutils.import_class( - import_str=import_str) - self.fs_bank_plugin = fs_bank_plugin_cls(plugin_config) - - def test_delete_object(self): - self.fs_bank_plugin.update_object("/key", "value") - self.fs_bank_plugin.delete_object("/key") - object_file = ( - self.fs_bank_plugin.object_container_path + "/key") - self.assertEqual(False, os.path.isfile(object_file)) - - def test_get_object(self): - self.fs_bank_plugin.update_object("/key", "value") - value = self.fs_bank_plugin.get_object("/key") - self.assertEqual("value", value) - - def test_list_objects(self): - self.fs_bank_plugin.update_object("/list/key-1", "value-1") - self.fs_bank_plugin.update_object("/list/key-2", "value-2") - objects = self.fs_bank_plugin.list_objects(prefix="/list/") - self.assertEqual(2, len(objects)) - self.assertIn('/list/key-1', objects) - self.assertIn('/list/key-2', objects) - - def test_list_objects_with_contain_sub_dir(self): - self.fs_bank_plugin.update_object("/list/key-1", "value-1") - self.fs_bank_plugin.update_object("/list/sub/key-2", "value-2") - self.fs_bank_plugin.update_object("/list/sub/key-3", "value-3") - objects = self.fs_bank_plugin.list_objects(prefix="/list/") - self.assertEqual(3, len(objects)) - self.assertIn("/list/key-1", objects) - self.assertIn("/list/sub/key-2", objects) - self.assertIn("/list/sub/key-3", objects) - - def test_update_object(self): - self.fs_bank_plugin.update_object("/key-1", "value-1") - self.fs_bank_plugin.update_object("/key-1", "value-2") - object_file = ( - self.fs_bank_plugin.object_container_path + "/key-1") - with open(object_file, "r") as f: - contents = f.read() - self.assertEqual("value-2", contents) - - def test_update_object_with_invaild_path(self): - self.assertRaises(exception.InvalidInput, - self.fs_bank_plugin.update_object, - "../../../../../../../etc/shadow", - "value-1") - - def test_create_get_dict_object(self): - self.fs_bank_plugin.update_object("/index.json", - {"key": "value"}) - value = self.fs_bank_plugin.get_object( - "/index.json") - self.assertEqual({"key": "value"}, value) diff --git a/karbor/tests/unit/protection/test_glance_protection_plugin.py b/karbor/tests/unit/protection/test_glance_protection_plugin.py deleted file mode 100644 index 1e7813bd..00000000 --- a/karbor/tests/unit/protection/test_glance_protection_plugin.py +++ /dev/null @@ -1,246 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from keystoneauth1 import session as keystone_session -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection import client_factory -from karbor.services.protection.protection_plugins. \ - image.image_protection_plugin import GlanceProtectionPlugin -from karbor.services.protection.protection_plugins.image \ - import image_plugin_schemas -from karbor.tests import base - - -class FakeBankPlugin(BankPlugin): - def update_object(self, key, value, context=None): - return - - def get_object(self, key, context=None): - return - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - return - - def delete_object(self, key, context=None): - return - - def get_owner_id(self, context=None): - return - - -fake_bank = Bank(FakeBankPlugin()) -fake_bank_section = BankSection(bank=fake_bank, section="fake") - -ResourceNode = collections.namedtuple( - "ResourceNode", - ["value", - "child_nodes"] -) - - -Image = collections.namedtuple( - "Image", - ["disk_format", - "container_format", - "status"] -) - -Server = collections.namedtuple( - "Server", - ["status"] -) - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class FakeCheckpoint(object): - def __init__(self): - super(FakeCheckpoint, self).__init__() - self.bank_section = fake_bank_section - - def get_resource_bank_section(self, resource_id): - return self.bank_section - - -class GlanceProtectionPluginTest(base.TestCase): - def setUp(self): - super(GlanceProtectionPluginTest, self).setUp() - - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='image_backup_plugin', - poll_interval=0, - ) - plugin_config_fixture.load_raw_values( - group='image_backup_plugin', - backup_image_object_size=65536, - ) - plugin_config_fixture.load_raw_values( - group='image_backup_plugin', - enable_server_snapshot=True, - ) - self.plugin = GlanceProtectionPlugin(plugin_config) - cfg.CONF.set_default('glance_endpoint', - 'http://127.0.0.1:9292', - 'glance_client') - - self.cntxt = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh') - self.glance_client = client_factory.ClientFactory.create_client( - "glance", self.cntxt) - self.checkpoint = FakeCheckpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - constants.IMAGE_RESOURCE_TYPE) - self.assertEqual(options_schema, image_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - constants.IMAGE_RESOURCE_TYPE) - self.assertEqual(options_schema, image_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - constants.IMAGE_RESOURCE_TYPE) - self.assertEqual(options_schema, - image_plugin_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.protection_plugins.image.' - 'image_protection_plugin.utils.status_poll') - @mock.patch('karbor.services.protection.clients.glance.create') - def test_create_backup(self, mock_glance_create, mock_status_poll): - resource = Resource(id="123", - type=constants.IMAGE_RESOURCE_TYPE, - name='fake') - - fake_bank_section.update_object = mock.MagicMock() - - protect_operation = self.plugin.get_protect_operation(resource) - mock_glance_create.return_value = self.glance_client - - self.glance_client.images.get = mock.MagicMock() - self.glance_client.images.return_value = Image( - disk_format="", - container_format="", - status="active" - ) - - fake_bank_section.update_object = mock.MagicMock() - self.glance_client.images.data = mock.MagicMock() - self.glance_client.images.data.return_value = [] - mock_status_poll.return_value = True - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.client_factory.ClientFactory.' - '_generate_session') - @mock.patch('karbor.services.protection.protection_plugins.image.' - 'image_protection_plugin.utils.status_poll') - @mock.patch('karbor.services.protection.clients.nova.create') - @mock.patch('karbor.services.protection.clients.glance.create') - def test_create_backup_with_server_id_in_extra_info( - self, mock_glance_create, mock_nova_create, mock_status_poll, - mock_generate_session): - cfg.CONF.set_default('nova_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'nova_client') - self.nova_client = client_factory.ClientFactory.create_client( - "nova", self.cntxt) - mock_generate_session.return_value = keystone_session.Session( - auth=None) - resource = Resource(id="123", - type=constants.IMAGE_RESOURCE_TYPE, - name='fake', - extra_info={'server_id': 'fake_server_id'}) - - protect_operation = self.plugin.get_protect_operation(resource) - mock_glance_create.return_value = self.glance_client - self.glance_client.images.get = mock.MagicMock() - self.glance_client.images.return_value = Image( - disk_format="", - container_format="", - status="active" - ) - - mock_nova_create.return_value = self.nova_client - self.nova_client.servers.get = mock.MagicMock() - self.nova_client.servers.get.return_value = Server( - status='ACTIVE') - self.nova_client.servers.image_create = mock.MagicMock() - self.nova_client.servers.image_create.return_value = '345' - fake_bank_section.update_object = mock.MagicMock() - self.glance_client.images.data = mock.MagicMock() - self.glance_client.images.data.return_value = [] - - mock_status_poll.return_value = True - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - def test_delete_backup(self): - resource = Resource(id="123", - type=constants.IMAGE_RESOURCE_TYPE, - name='fake') - - fake_bank_section.list_objects = mock.MagicMock() - fake_bank_section.list_objects.return_value = ["data_1", "data_2"] - fake_bank_section.delete_object = mock.MagicMock() - delete_operation = self.plugin.get_delete_operation(resource) - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - def test_verify_backup(self, mock_update_verify): - resource = Resource(id="123", - type=constants.IMAGE_RESOURCE_TYPE, - name='fake') - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = 'available' - - verify_operation = self.plugin.get_verify_operation(resource) - call_hooks(verify_operation, self.checkpoint, resource, self.cntxt, - {}) - mock_update_verify.assert_called_with( - None, resource.type, resource.id, 'available') - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual([constants.IMAGE_RESOURCE_TYPE], types) diff --git a/karbor/tests/unit/protection/test_graph.py b/karbor/tests/unit/protection/test_graph.py deleted file mode 100644 index d343b413..00000000 --- a/karbor/tests/unit/protection/test_graph.py +++ /dev/null @@ -1,300 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from collections import namedtuple -from oslo_serialization import jsonutils -from oslo_serialization import msgpackutils - -from karbor import exception -from karbor import resource -import karbor.services.protection.graph as graph -from karbor.tests import base - - -class GraphBuilderTest(base.TestCase): - def test_source_set(self): - """Test that the source set only contains sources""" - - test_matrix = ( - ({ - "A": ["B"], - "B": ["C"], - "C": [], - }, {"A"}), - ({ - "A": [], - "B": ["C"], - "C": [], - }, {"A", "B"}), - ({ - "A": ["C"], - "B": ["C"], - "C": [], - }, {"A", "B"}), - ) - - for g, expected_result in test_matrix: - result = graph.build_graph(g.keys(), g.__getitem__) - self.assertEqual(expected_result, {node.value for node in result}) - - def test_detect_cyclic_graph(self): - """Test that cyclic graphs are detected""" - - test_matrix = ( - ({ - "A": ["B"], - "B": ["C"], - "C": [], - }, False), - ({ - "A": [], - "B": ["C"], - "C": [], - }, False), - ({ - "A": ["C"], - "B": ["C"], - "C": ["A"], - }, True), - ({ - "A": ["B"], - "B": ["C"], - "C": ["A"], - }, True), - ) - - for g, expected_result in test_matrix: - if expected_result: - self.assertRaises( - graph.FoundLoopError, - graph.build_graph, - g.keys(), g.__getitem__, - ) - else: - graph.build_graph(g.keys(), g.__getitem__) - - def test_diamond_graph(self): - def test_node_children(testnode): - return testnode.children - - TestNode = namedtuple('TestNode', ['id', 'children']) -# A -# / \ -# B C -# \ / -# D - test_diamond_left = TestNode('D', ()) - test_diamond_right = TestNode('D', ()) - print('id left: ', id(test_diamond_left)) - print('id right:', id(test_diamond_right)) - test_left = TestNode('B', (test_diamond_left, )) - test_right = TestNode('C', (test_diamond_right, )) - test_root = TestNode('A', (test_left, test_right, )) - test_nodes = {test_root, } - result_graph = graph.build_graph(test_nodes, test_node_children) - test_root_node = result_graph[0] - self.assertEqual(2, len(test_root_node.child_nodes)) - test_left_node = test_root_node.child_nodes[0] - test_right_node = test_root_node.child_nodes[1] - - self.assertEqual(id(test_left_node.child_nodes[0]), - id(test_right_node.child_nodes[0])) - - def test_graph_pack_unpack(self): - test_base = { - "A1": ["B1", "B2"], - "B1": ["C1", "C2"], - "B2": ["C3", "C2"], - "C1": [], - "C2": [], - "C3": [], - } - - test_graph = graph.build_graph(test_base.keys(), test_base.__getitem__) - packed_graph = graph.pack_graph(test_graph) - unpacked_graph = graph.unpack_graph(packed_graph) - self.assertEqual(test_graph, unpacked_graph) - - def test_graph_serialize_deserialize(self): - Format = namedtuple('Format', ['loads', 'dumps']) - formats = [ - Format(jsonutils.loads, jsonutils.dumps), - Format(msgpackutils.loads, msgpackutils.dumps), - ] - test_base = { - "A1": ["B1", "B2"], - "B1": ["C1", "C2"], - "B2": ["C3", "C2"], - "C1": [], - "C2": [], - "C3": [], - } - - test_graph = graph.build_graph(test_base.keys(), test_base.__getitem__) - for fmt in formats: - serialized = fmt.dumps(graph.pack_graph(test_graph)) - unserialized = graph.unpack_graph(fmt.loads(serialized)) - self.assertEqual(test_graph, unserialized) - - def test_graph_serialize(self): - resource_a = resource.Resource('server', 0, 'a', {'name': 'a'}) - resource_b = resource.Resource('volume', 1, 'b', {'name': 'b'}) - test_base = { - resource_a: [resource_b], - resource_b: [] - } - test_graph = graph.build_graph(test_base.keys(), test_base.__getitem__) - self.assertIn( - graph.serialize_resource_graph(test_graph), - [ - '[{"0x1": ["server", 0, "a", {"name": "a"}], ' - '"0x0": ["volume", 1, "b", {"name": "b"}]}, ' - '[["0x1", ["0x0"]]]]', - '[{"0x0": ["volume", 1, "b", {"name": "b"}], ' - '"0x1": ["server", 0, "a", {"name": "a"}]}, ' - '[["0x1", ["0x0"]]]]' - ]) - - def test_graph_deserialize_unordered_adjacency(self): - test_base = { - "A1": ["B1", "B2"], - "B1": ["C1", "C2"], - "B2": ["C3", "C2"], - "C1": [], - "C2": [], - "C3": [], - } - test_graph = graph.build_graph(test_base.keys(), test_base.__getitem__) - packed_graph = graph.pack_graph(test_graph) - reversed_adjacency = tuple(reversed(packed_graph.adjacency)) - packed_graph = graph.PackedGraph(packed_graph.nodes, - reversed_adjacency) - with self.assertRaisesRegex(exception.InvalidInput, "adjacency list"): - graph.unpack_graph(packed_graph) - - def test_pack_unpack_graph_with_isolated_node(self): - test_base = { - "A1": ["B1", "B2"], - "B1": ["C1", "C2"], - "B2": ["C3", "C2"], - "C1": [], - "C2": [], - "C3": [], - "C4": [] - } - - test_graph = graph.build_graph(test_base.keys(), test_base.__getitem__) - packed_graph = graph.pack_graph(test_graph) - unpacked_graph = graph.unpack_graph(packed_graph) - self.assertEqual(len(test_graph), len(unpacked_graph)) - for start_node in test_graph: - self.assertIn(start_node, unpacked_graph) - - def test_pack_unpack_graph(self): - test_base = { - "A1": ["B1", "B2", "B3", "B4"], - "B1": [], - "B2": [], - "B3": ["B1"], - "B4": ["B2"], - } - - test_graph = graph.build_graph(test_base.keys(), test_base.__getitem__) - packed_graph = graph.pack_graph(test_graph) - unpacked_graph = graph.unpack_graph(packed_graph) - self.assertEqual(len(test_graph), len(unpacked_graph)) - for start_node in test_graph: - self.assertIn(start_node, unpacked_graph) - - -class _TestGraphWalkerListener(graph.GraphWalkerListener): - def __init__(self, expected_event_stream, test): - super(_TestGraphWalkerListener, self).__init__() - # Because the testing framework is badly designed - # I need to have a reference to the test to raise assertions - self._test = test - self._expected_expected_event_stream = list(expected_event_stream) - - def on_node_enter(self, node, already_visited): - self._test.assertEqual( - self._expected_expected_event_stream.pop(0), - ("on_node_enter", node.value, already_visited), - ) - - def on_node_exit(self, node): - self._test.assertEqual( - self._expected_expected_event_stream.pop(0), - ("on_node_exit", node.value), - ) - - -class GraphWalkerTest(base.TestCase): - def test_graph_walker(self): - test_matrix = ( - ({ - 'A': ['B'], - 'B': ['C'], - 'C': [], - }, ( - ("on_node_enter", 'A', False), - ("on_node_enter", 'B', False), - ("on_node_enter", 'C', False), - ("on_node_exit", 'C'), - ("on_node_exit", 'B'), - ("on_node_exit", 'A'), - )), - ({ - 'A': ['C'], - 'B': ['C'], - 'C': [], - }, ( - ("on_node_enter", 'A', False), - ("on_node_enter", 'C', False), - ("on_node_exit", 'C'), - ("on_node_exit", 'A'), - ("on_node_enter", 'B', False), - ("on_node_enter", 'C', True), - ("on_node_exit", 'C'), - ("on_node_exit", 'B'), - )), - ({ - 'A': ['C'], - 'B': ['C'], - 'C': ['D', 'E'], - 'D': [], - 'E': [], - }, ( - ("on_node_enter", 'A', False), - ("on_node_enter", 'C', False), - ("on_node_enter", 'D', False), - ("on_node_exit", 'D'), - ("on_node_enter", 'E', False), - ("on_node_exit", 'E'), - ("on_node_exit", 'C'), - ("on_node_exit", 'A'), - ("on_node_enter", 'B', False), - ("on_node_enter", 'C', True), - ("on_node_enter", 'D', True), - ("on_node_exit", 'D'), - ("on_node_enter", 'E', True), - ("on_node_exit", 'E'), - ("on_node_exit", 'C'), - ("on_node_exit", 'B'), - )), - ) - - for g, expected_calls in test_matrix: - listener = _TestGraphWalkerListener(expected_calls, self) - walker = graph.GraphWalker() - walker.register_listener(listener) - keys = list(g.keys()) - keys.sort() - walker.walk_graph(graph.build_graph(keys, g.__getitem__)) diff --git a/karbor/tests/unit/protection/test_manager.py b/karbor/tests/unit/protection/test_manager.py deleted file mode 100644 index ed7d13bc..00000000 --- a/karbor/tests/unit/protection/test_manager.py +++ /dev/null @@ -1,288 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -import oslo_messaging - -from karbor import exception -from karbor.resource import Resource -from karbor.services.protection.flows import utils -from karbor.services.protection.flows import worker as flow_manager -from karbor.services.protection import manager -from karbor.services.protection import protectable_registry -from karbor.services.protection import provider - -from karbor.tests import base -from karbor.tests.unit.protection import fakes - -CONF = cfg.CONF -CONF.import_opt('trigger_poll_interval', 'karbor.services.operationengine' - '.engine.triggers.timetrigger') - - -class ProtectionServiceTest(base.TestCase): - def setUp(self): - self.load_engine = flow_manager.Worker._load_engine - flow_manager.Worker._load_engine = mock.Mock() - flow_manager.Worker._load_engine.return_value = fakes.FakeFlowEngine() - super(ProtectionServiceTest, self).setUp() - self.pro_manager = manager.ProtectionManager() - self.protection_plan = fakes.fake_protection_plan() - - @mock.patch.object(protectable_registry.ProtectableRegistry, - 'list_resource_types') - def test_list_protectable_types(self, mocker): - expected = ["OS::Nova::Server", - "OS::Cinder::Volume"] - mocker.return_value = expected - result = self.pro_manager.list_protectable_types(None) - self.assertEqual(expected, result) - - def test_show_protectable_type(self): - def mock_plugins(self): - self._plugin_map = { - "OS::Nova::Server": server_plugin, - "OS::Cinder::Volume": volume_plugin - } - - server_plugin = fakes.FakeProtectablePlugin() - server_plugin.get_resource_type = mock.MagicMock( - return_value="OS::Nova::Server") - volume_plugin = fakes.FakeProtectablePlugin() - volume_plugin.get_parent_resource_types = mock.MagicMock( - return_value=["OS::Nova::Server"]) - - protectable_registry.ProtectableRegistry.load_plugins = mock_plugins - - result = self.pro_manager.show_protectable_type(None, - "OS::Nova::Server") - self.assertEqual("OS::Nova::Server", result["name"]) - self.assertEqual({"OS::Cinder::Volume", "OS::Glance::Image"}, - set(result["dependent_types"])) - - @mock.patch.object(protectable_registry.ProtectableRegistry, - 'show_resource') - def test_show_protectable_instance(self, mocker): - mocker.return_value = Resource(type='OS::Nova::Server', - id='123456', - name='name123') - fake_cntx = mock.MagicMock() - - result = self.pro_manager.show_protectable_instance( - fake_cntx, 'OS::Nova::Server', '123456') - self.assertEqual( - {'id': '123456', 'name': 'name123', 'type': 'OS::Nova::Server', - 'extra_info': None}, - result) - - @mock.patch.object(protectable_registry.ProtectableRegistry, - 'show_resource') - def test_show_protectable_instance_with_nonexist_id(self, mocker): - mocker.return_value = None - fake_cntx = mock.MagicMock() - - result = self.pro_manager.show_protectable_instance( - fake_cntx, 'OS::Nova::Server', '123456') - self.assertEqual(None, result) - - @mock.patch.object(protectable_registry.ProtectableRegistry, - 'list_resources') - def test_list_protectable_instances(self, mocker): - mocker.return_value = [Resource(type='OS::Nova::Server', - id='123456', - name='name123'), - Resource(type='OS::Nova::Server', - id='654321', - name='name654')] - fake_cntx = mock.MagicMock() - - result = self.pro_manager.list_protectable_instances( - fake_cntx, 'OS::Nova::Server') - self.assertEqual([{'id': '123456', 'name': 'name123', - 'extra_info': None}, - {'id': '654321', 'name': 'name654', - 'extra_info': None}], - result) - - @mock.patch.object(protectable_registry.ProtectableRegistry, - 'fetch_dependent_resources') - def test_list_protectable_dependents(self, mocker): - mocker.return_value = [Resource(type='OS::Cinder::Volume', - id='123456', name='name123'), - Resource(type='OS::Cinder::Volume', - id='654321', name='name654')] - fake_cntx = mock.MagicMock() - - result = self.pro_manager.list_protectable_dependents( - fake_cntx, 'fake_id', 'OS::Nova::Server', "") - self.assertEqual([{'type': 'OS::Cinder::Volume', 'id': '123456', - 'name': 'name123', 'extra_info': None}, - {'type': 'OS::Cinder::Volume', 'id': '654321', - 'name': 'name654', 'extra_info': None}], - result) - - @mock.patch.object(utils, 'update_operation_log') - @mock.patch.object(utils, 'create_operation_log') - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_protect(self, mock_provider, mock_operation_log_create, - mock_operation_log_update): - mock_provider.return_value = fakes.FakeProvider() - self.pro_manager.protect(None, fakes.fake_protection_plan()) - - @mock.patch.object(flow_manager.Worker, 'get_flow') - def test_protect_in_error(self, mock_flow): - mock_flow.side_effect = Exception() - self.assertRaises(oslo_messaging.ExpectedException, - self.pro_manager.protect, - None, - fakes.fake_protection_plan()) - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_restore_with_project_id_not_same(self, mock_provider): - mock_provider.return_value = fakes.FakeProvider() - context = mock.MagicMock(project_id='fake_project_id_1', - is_admin=False) - fake_restore = { - 'checkpoint_id': 'fake_checkpoint', - 'provider_id': 'fake_provider_id', - 'parameters': None - } - self.assertRaises( - oslo_messaging.ExpectedException, self.pro_manager.restore, - context, fake_restore, None) - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_list_checkpoints(self, mock_provider): - fake_provider = fakes.FakeProvider() - fake_provider.list_checkpoints = mock.MagicMock() - mock_provider.return_value = fake_provider - context = mock.MagicMock(project_id='fake_project_id') - self.pro_manager.list_checkpoints(context, 'provider1', filters={}, - all_tenants=False) - fake_provider.list_checkpoints.assert_called_once_with( - 'fake_project_id', 'provider1', limit=None, marker=None, - plan_id=None, start_date=None, end_date=None, - sort_dir=None, context=context, all_tenants=False) - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_list_checkpoints_with_all_tenants(self, mock_provider): - fake_provider = fakes.FakeProvider() - fake_provider.list_checkpoints = mock.MagicMock() - mock_provider.return_value = fake_provider - context = mock.MagicMock(project_id='fake_project_id') - self.pro_manager.list_checkpoints(context, 'provider1', filters={}, - all_tenants=True) - fake_provider.list_checkpoints.assert_called_once_with( - 'fake_project_id', 'provider1', limit=None, marker=None, - plan_id=None, start_date=None, end_date=None, - sort_dir=None, context=context, all_tenants=True) - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_list_checkpoints_with_all_tenants_and_filter_by_project_id( - self, mock_provider): - fake_provider = fakes.FakeProvider() - fake_provider.list_checkpoints = mock.MagicMock() - mock_provider.return_value = fake_provider - context = mock.MagicMock(project_id='fake_project_id') - self.pro_manager.list_checkpoints(context, 'provider1', filters={ - 'project_id': 'fake_project_id1'}, all_tenants=True) - fake_provider.list_checkpoints.assert_called_once_with( - 'fake_project_id1', 'provider1', limit=None, marker=None, - plan_id=None, start_date=None, end_date=None, - sort_dir=None, context=context, all_tenants=False) - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_show_checkpoint(self, mock_provider): - mock_provider.return_value = fakes.FakeProvider() - context = mock.MagicMock(project_id='fake_project_id') - cp = self.pro_manager.show_checkpoint(context, 'provider1', - 'fake_checkpoint') - self.assertEqual('fake_checkpoint', cp['id']) - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_show_checkpoint_not_allowed(self, mock_provider): - mock_provider.return_value = fakes.FakeProvider() - context = mock.MagicMock( - project_id='fake_project_id_1', - is_admin=False - ) - self.assertRaises(oslo_messaging.ExpectedException, - self.pro_manager.show_checkpoint, - context, 'provider1', 'fake_checkpoint') - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - @mock.patch.object(fakes.FakeCheckpointCollection, 'get') - def test_show_checkpoint_not_found(self, mock_cp_collection_get, - mock_provider): - mock_provider.return_value = fakes.FakeProvider() - context = mock.MagicMock() - mock_cp_collection_get.side_effect = exception.CheckpointNotFound( - checkpoint_id='123') - self.assertRaises(oslo_messaging.ExpectedException, - self.pro_manager.show_checkpoint, - context, - 'provider1', - 'non_existent_checkpoint') - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_checkpoint_state_reset(self, mock_provider): - fake_provider = fakes.FakeProvider() - fake_checkpoint = fakes.FakeCheckpoint() - fake_checkpoint.commit = mock.MagicMock() - fake_provider.get_checkpoint = mock.MagicMock( - return_value=fake_checkpoint) - mock_provider.return_value = fake_provider - context = mock.MagicMock(project_id='fake_project_id', is_admin=True) - self.pro_manager.reset_state(context, 'provider1', 'fake_checkpoint', - 'error') - self.assertEqual(fake_checkpoint.status, 'error') - self.assertEqual(True, fake_checkpoint.commit.called) - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_checkpoint_state_reset_with_access_not_allowed( - self, mock_provider): - fake_provider = fakes.FakeProvider() - fake_checkpoint = fakes.FakeCheckpoint() - fake_provider.get_checkpoint = mock.MagicMock( - return_value=fake_checkpoint) - mock_provider.return_value = fake_provider - context = mock.MagicMock(project_id='fake_project_id_01', - is_admin=False) - self.assertRaises(oslo_messaging.ExpectedException, - self.pro_manager.reset_state, - context, - 'fake_project_id', - 'fake_checkpoint_id', - 'error') - - @mock.patch.object(provider.ProviderRegistry, 'show_provider') - def test_checkpoint_state_reset_with_wrong_checkpoint_state( - self, mock_provider): - fake_provider = fakes.FakeProvider() - fake_checkpoint = fakes.FakeCheckpoint() - fake_checkpoint.status = 'deleting' - fake_provider.get_checkpoint = mock.MagicMock( - return_value=fake_checkpoint) - mock_provider.return_value = fake_provider - context = mock.MagicMock(project_id='fake_project_id', is_admin=True) - self.assertRaises(oslo_messaging.ExpectedException, - self.pro_manager.reset_state, - context, - 'fake_project_id', - 'fake_checkpoint_id', - 'error') - - def tearDown(self): - flow_manager.Worker._load_engine = self.load_engine - super(ProtectionServiceTest, self).tearDown() diff --git a/karbor/tests/unit/protection/test_manila_protection_plugin.py b/karbor/tests/unit/protection/test_manila_protection_plugin.py deleted file mode 100644 index 3e246693..00000000 --- a/karbor/tests/unit/protection/test_manila_protection_plugin.py +++ /dev/null @@ -1,223 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection import client_factory -from karbor.services.protection.protection_plugins. \ - share.share_snapshot_plugin import ManilaSnapshotProtectionPlugin -from karbor.services.protection.protection_plugins.share \ - import share_snapshot_plugin_schemas -from karbor.tests import base - - -class FakeBankPlugin(BankPlugin): - def update_object(self, key, value, context=None): - return - - def get_object(self, key, context=None): - return - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - return - - def delete_object(self, key, context=None): - return - - def get_owner_id(self, context=None): - return - - -fake_bank = Bank(FakeBankPlugin()) -fake_bank_section = BankSection(bank=fake_bank, section="fake") - -ResourceNode = collections.namedtuple( - "ResourceNode", - ["value", - "child_nodes"] -) - -Share = collections.namedtuple( - "Share", - ["status"] -) - -Snapshot = collections.namedtuple( - "Snapshot", - ["id", "status"] -) - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class FakeCheckpoint(object): - def __init__(self): - self.bank_section = fake_bank_section - - def get_resource_bank_section(self, resource_id): - return self.bank_section - - -class ManilaProtectionPluginTest(base.TestCase): - def setUp(self): - super(ManilaProtectionPluginTest, self).setUp() - - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='manila_snapshot_plugin', - poll_interval=0, - ) - - self.plugin = ManilaSnapshotProtectionPlugin(plugin_config) - - cfg.CONF.set_default('manila_endpoint', - 'http://127.0.0.1:8774/v2.1', - 'manila_client') - service_catalog = [ - {'type': 'sharev2', - 'endpoints': [{'publicURL': 'http://127.0.0.1:8774/v2.1/abcd'}], - }, - ] - self.cntxt = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=service_catalog) - self.manila_client = client_factory.ClientFactory.create_client( - "manila", self.cntxt) - self.checkpoint = FakeCheckpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - constants.SHARE_RESOURCE_TYPE) - self.assertEqual(options_schema, - share_snapshot_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - constants.SHARE_RESOURCE_TYPE) - self.assertEqual(options_schema, - share_snapshot_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - constants.SHARE_RESOURCE_TYPE) - self.assertEqual(options_schema, - share_snapshot_plugin_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.protection_plugins.share.' - 'share_snapshot_plugin.utils.status_poll') - @mock.patch('karbor.services.protection.clients.manila.create') - def test_create_snapshot(self, mock_manila_create, mock_status_poll): - resource = Resource(id="123", - type=constants.SHARE_RESOURCE_TYPE, - name='fake') - - fake_bank_section.update_object = mock.MagicMock() - - protect_operation = self.plugin.get_protect_operation(resource) - mock_manila_create.return_value = self.manila_client - - self.manila_client.shares.get = mock.MagicMock() - self.manila_client.shares.return_value = Share( - status="available" - ) - fake_bank_section.update_object = mock.MagicMock() - self.manila_client.share_snapshots.create = mock.MagicMock() - self.manila_client.share_snapshots.create.return_value = Snapshot( - id="1234", - status="available" - ) - self.manila_client.share_snapshots.get = mock.MagicMock() - self.manila_client.share_snapshots.get.return_value = Snapshot( - id="1234", - status="available" - ) - mock_status_poll.return_value = True - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.share.' - 'share_snapshot_plugin.utils.status_poll') - @mock.patch('karbor.services.protection.clients.manila.create') - def test_delete_snapshot(self, mock_manila_create, mock_status_poll): - resource = Resource(id="123", - type=constants.SHARE_RESOURCE_TYPE, - name='fake') - mock_manila_create.return_value = self.manila_client - self.manila_client.share_snapshots.get = mock.MagicMock() - self.manila_client.share_snapshots.get.return_value = Snapshot( - id="1234", - status="available" - ) - self.manila_client.share_snapshots.delete = mock.MagicMock() - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = { - "snapshot_id": "1234"} - - mock_status_poll.return_value = True - delete_operation = self.plugin.get_delete_operation(resource) - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - @mock.patch('karbor.services.protection.clients.manila.create') - def test_verify_snapshot(self, mock_manila_create, mock_update_verify): - resource = Resource(id="123", - type=constants.SHARE_RESOURCE_TYPE, - name='fake') - mock_manila_create.return_value = self.manila_client - self.manila_client.share_snapshots.get = mock.MagicMock() - self.manila_client.share_snapshots.get.return_value = Snapshot( - id="1234", - status="available" - ) - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = { - "snapshot_id": "1234"} - - verify_operation = self.plugin.get_verify_operation(resource) - call_hooks(verify_operation, self.checkpoint, resource, self.cntxt, - {}) - mock_update_verify.assert_called_with( - None, resource.type, resource.id, 'available') - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual([constants.SHARE_RESOURCE_TYPE], types) diff --git a/karbor/tests/unit/protection/test_neutron_protection_plugin.py b/karbor/tests/unit/protection/test_neutron_protection_plugin.py deleted file mode 100644 index 3f3a97a4..00000000 --- a/karbor/tests/unit/protection/test_neutron_protection_plugin.py +++ /dev/null @@ -1,409 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - -from oslo_config import cfg - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection import client_factory -from karbor.services.protection.protection_plugins.network \ - import network_plugin_schemas -from karbor.services.protection.protection_plugins.network. \ - neutron_protection_plugin import NeutronProtectionPlugin -from karbor.tests import base - -FakeNetworks = {'networks': [ - {u'status': u'ACTIVE', - u'router:external': False, - u'availability_zone_hints': [], - u'availability_zones': [u'nova'], - u'ipv4_address_scope': None, - u'description': u'', - u'provider:physical_network': None, - u'subnets': [u'129f1bc5-4282-4f9d-ae60-4db1e1cac22d', - u'0b42e051-5a33-4ac4-9a4f-691d0891d760'], - u'updated_at': u'2016-04-23T05:07:06', - u'tenant_id': u'f6f6d0b2591f41acb8257656d70029fc', - u'created_at': u'2016-04-23T05:07:06', - u'tags': [], - u'ipv6_address_scope': None, - u'provider:segmentation_id': 1057, - u'provider:network_type': u'vxlan', - u'port_security_enabled': True, - u'admin_state_up': True, - u'shared': False, - u'mtu': 1450, - u'id': u'9b68fb64-39d4-4d41-8cc9-f27846c6e5f5', - u'name': u'private'}, - - {u'provider:physical_network': None, - u'ipv6_address_scope': None, - u'port_security_enabled': True, - u'provider:network_type': u'local', - u'id': u'49ef013d-9bb2-4b8f-9eea-e45563efc420', - u'router:external': True, - u'availability_zone_hints': [], - u'availability_zones': [u'nova'], - u'ipv4_address_scope': None, - u'shared': False, - u'status': u'ACTIVE', - u'subnets': [u'808c3b3f-3d79-4c5b-a5b6-95dd07abeb2d'], - u'description': u'', - u'tags': [], - u'updated_at': u'2016-04-25T07:14:53', - u'is_default': False, - u'provider:segmentation_id': None, - u'name': u'ext_net', - u'admin_state_up': True, - u'tenant_id': u'f6f6d0b2591f41acb8257656d70029fc', - u'created_at': u'2016-04-25T07:14:53', - u'mtu': 1500} -]} - -FakeSubnets = {'subnets': [ - {u'description': u'', - u'enable_dhcp': True, - u'network_id': u'49ef013d-9bb2-4b8f-9eea-e45563efc420', - u'tenant_id': u'f6f6d0b2591f41acb8257656d70029fc', - u'created_at': u'2016-04-25T07:15:25', - u'dns_nameservers': [], - u'updated_at': u'2016-04-25T07:15:25', - u'ipv6_ra_mode': None, - u'allocation_pools': [{u'start': u'192.168.21.2', - u'end': u'192.168.21.254'}], - u'gateway_ip': u'192.168.21.1', - u'ipv6_address_mode': None, - u'ip_version': 4, - u'host_routes': [], - u'cidr': u'192.168.21.0/24', - u'id': u'808c3b3f-3d79-4c5b-a5b6-95dd07abeb2d', - u'subnetpool_id': None, - u'name': u'ext_subnet'}, -]} - -FakePorts = {'ports': [ - {u'allowed_address_pairs': [], - u'extra_dhcp_opts': [], - u'updated_at': u'2016-04-25T07:15:59', - u'device_owner': - u'network:router_gateway', - u'port_security_enabled': False, - u'binding:profile': {}, - u'fixed_ips': [{u'subnet_id': u'808c3b3f-3d79-4c5b-a5b6-95dd07abeb2d', - u'ip_address': u'192.168.21.3'}], - u'id': u'2b34c97a-4ccc-44c0-bc50-b7bbfc3508eb', - u'security_groups': [], - u'binding:vif_details': {}, - u'binding:vif_type': u'unbound', - u'mac_address': u'fa:16:3e:00:47:f2', - u'status': u'DOWN', - u'binding:host_id': u'', - u'description': u'', - u'device_id': u'7fc86d4b-4c0e-4ed8-8d39-e27b7c1b7ae8', - u'name': u'', - u'admin_state_up': True, - u'network_id': u'49ef013d-9bb2-4b8f-9eea-e45563efc420', - u'dns_name': None, - u'created_at': u'2016-04-25T07:15:59', - u'binding:vnic_type': u'normal', - u'tenant_id': u''}, -]} - -FakeRoutes = {'routers': [ - {u'status': u'ACTIVE', - u'external_gateway_info': - {u'network_id': u'49ef013d-9bb2-4b8f-9eea-e45563efc420', - u'enable_snat': True, - u'external_fixed_ips': - [{u'subnet_id': u'808c3b3f-3d79-4c5b-a5b6-95dd07abeb2d', - u'ip_address': u'192.168.21.3'} - ]}, - u'availability_zone_hints': [], - u'availability_zones': [], - u'description': u'', - u'admin_state_up': True, - u'tenant_id': u'f6f6d0b2591f41acb8257656d70029fc', - u'distributed': False, - u'routes': [], - u'ha': False, - u'id': u'7fc86d4b-4c0e-4ed8-8d39-e27b7c1b7ae8', - u'name': u'provider_route'} -]} - -FakeSecGroup = {'security_groups': [ - {u'tenant_id': u'23b119d06168447c8dbb4483d9567bd8', - u'name': u'default', - u'id': u'97910ed4-1dcb-4704-8814-3ddca818ac16', - u'description': u'Default security group', - u'security_group_rules': [ - {u'remote_group_id': u'ac4a6134-0176-44db-abab-559d284c4cdc', - u'direction': u'ingress', - u'protocol': None, - u'description': u'', - u'ethertype': u'IPv4', - u'remote_ip_prefix': None, - u'port_range_max': None, - u'security_group_id': u'ac4a6134-0176-44db-abab-559d284c4cdc', - u'port_range_min': None, - u'tenant_id': u'23b119d06168447c8dbb4483d9567bd8', - u'id': u'21416a24-6a7a-4830-bbec-1426b21e085a'}, - - {u'remote_group_id': u'ac4a6134-0176-44db-abab-559d284c4cdc', - u'direction': u'ingress', - u'protocol': None, - u'description': u'', - u'ethertype': u'IPv6', - u'remote_ip_prefix': None, - u'port_range_max': None, - u'security_group_id': u'ac4a6134-0176-44db-abab-559d284c4cdc', - u'port_range_min': None, - u'tenant_id': u'23b119d06168447c8dbb4483d9567bd8', - u'id': u'47f67d6a-4e73-465a-9f4d-d9b850f85f22'}, - - {u'remote_group_id': None, - u'direction': u'egress', - u'protocol': None, - u'description': u'', - u'ethertype': u'IPv6', - u'remote_ip_prefix': None, - u'port_range_max': None, - u'security_group_id': u'ac4a6134-0176-44db-abab-559d284c4cdc', - u'port_range_min': None, - u'tenant_id': u'23b119d06168447c8dbb4483d9567bd8', - u'id': u'c24e7148-820c-4147-9032-6fcdb96db6f7'}]}, -]} - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class FakeNeutronClient(object): - def list_networks(self): - return FakeNetworks - - def list_subnets(self): - return FakeSubnets - - def list_ports(self): - return FakePorts - - def list_routers(self): - return FakeRoutes - - def list_security_groups(self): - return FakeSecGroup - - -class FakeBankPlugin(BankPlugin): - def __init__(self): - self._objects = {} - - def create_object(self, key, value): - self._objects[key] = value - - def update_object(self, key, value, context=None): - self._objects[key] = value - - def get_object(self, key, context=None): - value = self._objects.get(key, None) - if value is None: - raise Exception - return value - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - objects_name = [] - if prefix is not None: - for key, value in self._objects.items(): - if key.find(prefix) == 0: - objects_name.append(key.lstrip(prefix)) - else: - objects_name = self._objects.keys() - return objects_name - - def delete_object(self, key, context=None): - self._objects.pop(key) - - def get_owner_id(self, context=None): - return - - -fake_checkpointid = "checkpoint_id" -fake_project_id = "abcd" -fake_bank = Bank(FakeBankPlugin()) -fake_bank_section = BankSection(bank=fake_bank, section="fake") - -ResourceNode = collections.namedtuple( - "ResourceNode", - ["value"] -) - - -class FakeCheckpoint(object): - def __init__(self): - self.bank_section = fake_bank_section - - def get_resource_bank_section(self, resource_id): - return self.bank_section - - -class NeutronProtectionPluginTest(base.TestCase): - def setUp(self): - super(NeutronProtectionPluginTest, self).setUp() - - self.plugin = NeutronProtectionPlugin(cfg.CONF) - - cfg.CONF.set_default('neutron_endpoint', - 'http://127.0.0.1:9696', - 'neutron_client') - - self.cntxt = RequestContext(user_id='admin', - project_id='abcd', - auth_token='efgh') - - self.neutron_client = client_factory.ClientFactory.create_client( - "neutron", self.cntxt) - self.checkpoint = FakeCheckpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - 'OS::Neutron::Network') - self.assertEqual(options_schema, - network_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - 'OS::Neutron::Network') - self.assertEqual(options_schema, - network_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - 'OS::Neutron::Network') - self.assertEqual(options_schema, - network_plugin_schemas.SAVED_INFO_SCHEMA) - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual([constants.NETWORK_RESOURCE_TYPE], types) - - @mock.patch('karbor.services.protection.clients.neutron.create') - def test_create_backup(self, mock_neutron_create): - resource = Resource(id="network_id_1", - type=constants.NETWORK_RESOURCE_TYPE, - name="test") - - fake_bank_section.update_object = mock.MagicMock() - - protect_operation = self.plugin.get_protect_operation(resource) - mock_neutron_create.return_value = self.neutron_client - - self.neutron_client.list_networks = mock.MagicMock() - self.neutron_client.list_networks.return_value = FakeNetworks - - self.neutron_client.list_subnets = mock.MagicMock() - self.neutron_client.list_subnets.return_value = FakeSubnets - - self.neutron_client.list_ports = mock.MagicMock() - self.neutron_client.list_ports.return_value = FakePorts - - self.neutron_client.list_routers = mock.MagicMock() - self.neutron_client.list_routers.return_value = FakeRoutes - - self.neutron_client.list_security_groups = mock.MagicMock() - self.neutron_client.list_security_groups.return_value = FakeSecGroup - - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.clients.neutron.create') - def test_delete_backup(self, mock_neutron_create): - resource = Resource(id="network_id_1", - type=constants.NETWORK_RESOURCE_TYPE, - name="test") - - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/network_id_1/metadata"] = { - "resource_id": "network_id_1", - "network_metadata": { - "id": "9b68fb64-39d4-4d41-8cc9-f27846c6e5f5", - "router:external": False, - "admin_state_up": True, - "mtu": 1500 - }, - "subnet_metadata": { - "id": "808c3b3f-3d79-4c5b-a5b6-95dd07abeb2d", - "network_id": "9b68fb64-39d4-4d41-8cc9-f27846c6e5f5", - "host_routes": [], - "dns_nameservers": [] - }, - "port_metadata": { - "id": "2b34c97a-4ccc-44c0-bc50-b7bbfc3508eb", - "admin_state_up": True, - "allowed_address_pairs": [], - "fixed_ips": [{"subnet_id": "3d79-4c5b-a5b6-95dd07abeb2d", - "ip_address": "192.168.21.3"}] - }, - "router_metadata": { - "id": "4c0e-4ed8-8d39-e27b7c1b7ae8", - "admin_state_up": True, - "availability_zone_hints": [], - "fixed_ips": {"network_id": "9bb2-4b8f-9eea-e45563efc420", - "enable_snat": True} - }, - "security-group_metadata": { - "id": "4ccc-44c0-bc50-b7bbfc3508eb", - "description": "Default security group", - "security_group_rules": [] - } - } - - delete_operation = self.plugin.get_delete_operation(resource) - mock_neutron_create.return_value = self.neutron_client - - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - def test_verify_backup(self, mock_update_verify): - resource = Resource(id="abcd", - type=constants.NETWORK_RESOURCE_TYPE, - name="test") - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = 'available' - - verify_operation = self.plugin.get_verify_operation(resource) - call_hooks(verify_operation, self.checkpoint, resource, self.cntxt, - {}) - mock_update_verify.assert_called_with( - None, resource.type, resource.id, 'available') diff --git a/karbor/tests/unit/protection/test_nova_protection_plugin.py b/karbor/tests/unit/protection/test_nova_protection_plugin.py deleted file mode 100644 index 7b1a653c..00000000 --- a/karbor/tests/unit/protection/test_nova_protection_plugin.py +++ /dev/null @@ -1,532 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import namedtuple -from unittest import mock - -from oslo_config import cfg - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection.protection_plugins.server \ - import server_plugin_schemas -from karbor.services.protection.protection_plugins.server. \ - nova_protection_plugin import NovaProtectionPlugin -from karbor.tests import base - - -class Server(object): - def __init__(self, id, addresses, availability_zone, - flavor, key_name, security_groups, status): - super(Server, self).__init__() - self.id = id - self.addresses = addresses - self.__setattr__("OS-EXT-AZ:availability_zone", availability_zone) - self.flavor = flavor - self.key_name = key_name - self.security_groups = security_groups - self.status = status - - -class Volume(object): - def __init__(self, id, volume_type, status, bootable, - attachments, name=None): - super(Volume, self).__init__() - self.id = id - self.volume_type = volume_type - self.status = status - self.bootable = bootable - self.attachments = attachments - self.name = name - - -class Image(object): - def __init__(self, id, status, disk_format, container_format): - super(Image, self).__init__() - self.id = id - self.status = status - self.disk_format = disk_format - self.container_format = container_format - - -FakePorts = {'ports': [ - {'fixed_ips': [{'subnet_id': 'subnet-1', - 'ip_address': '10.0.0.21'}], - 'id': 'port-1', - 'mac_address': 'mac_address_1', - 'device_id': 'vm_id_1', - 'name': '', - 'admin_state_up': True, - 'network_id': 'network_id_1'}, - {'fixed_ips': [{'subnet_id': 'subnet-1', - 'ip_address': '10.0.0.22'}], - 'id': 'port-2', - 'mac_address': 'mac_address_2', - 'device_id': 'vm_id_2', - 'name': '', - 'admin_state_up': True, - 'network_id': 'network_id_2'} -]} - -FakeServers = { - "vm_id_1": Server(id="vm_id_1", - addresses={'fake_net': [ - {'OS-EXT-IPS-MAC:mac_addr': 'mac_address_1', - 'OS-EXT-IPS:type': 'fixed', - 'addr': '10.0.0.21', - 'version': 4} - ]}, - availability_zone="nova", - flavor={'id': 'flavor_id', - 'links': [ - {'href': '', - 'rel': 'bookmark'} - ]}, - key_name=None, - security_groups="default", - status="ACTIVE"), - "vm_id_2": Server(id="vm_id_2", - addresses={'fake_net': [ - {'OS-EXT-IPS-MAC:mac_addr': 'mac_address_2', - 'OS-EXT-IPS:type': 'fixed', - 'addr': '10.0.0.22', - 'version': 4} - ]}, - availability_zone="nova", - flavor={'id': 'flavor_id', - 'links': [ - {'href': '', - 'rel': 'bookmark'} - ]}, - key_name=None, - security_groups="default", - status="ACTIVE") -} - -FakeVolumes = { - "vol_id_1": Volume(id="vol_id_1", - volume_type="", - status="in-use", - bootable="", - attachments=[{'server_id': 'vm_id_2', - 'attachment_id': '', - 'host_name': '', - 'volume_id': 'vol_id_1', - 'device': '/dev/vdb', - 'id': 'attach_id_1'}], - name="vol_id_1_name") -} - -FakeImages = { - "image_id_1": Image(id="image_id_1", - disk_format="", - container_format="", - status="active") -} - -FakeGraphNode = namedtuple("GraphNode", ( - "value", - "child_nodes", -)) - - -class FakeNovaClient(object): - class Servers(object): - def get(self, server_id): - return FakeServers[server_id] - - def create_image(self, server_id, name, **kwargs): - FakeImages["image_id_2"] = Image(id="image_id_2", - disk_format="", - container_format="", - status="active") - return "image_id_2" - - def __getattr__(self, item): - return None - - def __init__(self): - super(FakeNovaClient, self).__init__() - self.servers = self.Servers() - - -class FakeGlanceClient(object): - class Images(object): - def get(self, image_id): - return FakeImages[image_id] - - def data(self, image_id): - return "image_data_" + image_id - - def delete(self, image_id): - if image_id in FakeImages: - FakeImages.pop(image_id) - - def __getattr__(self, item): - return None - - def __init__(self): - super(FakeGlanceClient, self).__init__() - self.images = self.Images() - - -class FakeCinderClient(object): - class Volumes(object): - def get(self, volume_id): - return FakeVolumes[volume_id] - - def list(self, detailed=True, search_opts=None, limit=None): - return [FakeVolumes['vol_id_1'], ] - - def __getattr__(self, item): - return None - - def __init__(self): - super(FakeCinderClient, self).__init__() - self.volumes = self.Volumes() - - -class FakeNeutronClient(object): - def list_ports(self, mac_address): - result_ports = [] - for port in FakePorts["ports"]: - if port["mac_address"] == mac_address: - result_ports.append(port) - return {"ports": result_ports} - - -class FakeBankPlugin(BankPlugin): - def __init__(self, config=None): - super(FakeBankPlugin, self).__init__(config) - self._objects = {} - - def update_object(self, key, value, context=None): - self._objects[key] = value - - def get_object(self, key, context=None): - value = self._objects.get(key, None) - if value is None: - raise Exception - return value - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - objects_name = [] - if prefix is not None: - for key, value in self._objects.items(): - if key.find(prefix) == 0: - objects_name.append(key) - else: - objects_name = self._objects.keys() - return objects_name - - def delete_object(self, key, context=None): - self._objects.pop(key) - - def get_owner_id(self, context=None): - return - - -fake_bank = Bank(FakeBankPlugin()) - -ResourceNode = namedtuple( - "ResourceNode", - ["value", - "child_nodes"] -) - - -class Checkpoint(object): - def __init__(self): - super(Checkpoint, self).__init__() - self.id = "checkpoint_id" - self.graph = [] - - @property - def resource_graph(self): - return self.graph - - @resource_graph.setter - def resource_graph(self, resource_graph): - self.graph = resource_graph - - def get_resource_bank_section(self, resource_id): - return BankSection( - bank=fake_bank, - section="/resource_data/%s/%s" % (self.id, resource_id) - ) - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class NovaProtectionPluginTest(base.TestCase): - def setUp(self): - super(NovaProtectionPluginTest, self).setUp() - self.cntxt = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh') - self.plugin = NovaProtectionPlugin(cfg.CONF) - self.glance_client = FakeGlanceClient() - self.nova_client = FakeNovaClient() - self.cinder_client = FakeCinderClient() - self.neutron_client = FakeNeutronClient() - self.checkpoint = Checkpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - constants.SERVER_RESOURCE_TYPE) - self.assertEqual(options_schema, server_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - constants.SERVER_RESOURCE_TYPE) - self.assertEqual(options_schema, server_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - constants.SERVER_RESOURCE_TYPE) - self.assertEqual(options_schema, - server_plugin_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.clients.neutron.create') - @mock.patch('karbor.services.protection.clients.glance.create') - @mock.patch('karbor.services.protection.clients.nova.create') - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_create_backup_without_volumes(self, mock_cinder_client, - mock_nova_client, - mock_glance_client, - mock_neutron_client): - resource = Resource(id="vm_id_1", - type=constants.SERVER_RESOURCE_TYPE, - name="fake_vm") - - protect_operation = self.plugin.get_protect_operation(resource) - mock_cinder_client.return_value = self.cinder_client - mock_nova_client.return_value = self.nova_client - mock_glance_client.return_value = self.glance_client - mock_neutron_client.return_value = self.neutron_client - - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - self.assertEqual( - constants.RESOURCE_STATUS_AVAILABLE, - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/vm_id_1/status"] - ) - resource_definition = { - "resource_id": "vm_id_1", - "attach_metadata": {}, - 'boot_metadata': {'boot_device_type': 'volume'}, - "server_metadata": { - "availability_zone": "nova", - "networks": ["network_id_1"], - "floating_ips": [], - "flavor": "flavor_id", - "key_name": None, - "security_groups": "default", - }, - } - self.assertEqual( - resource_definition, - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/vm_id_1/metadata"] - ) - - @mock.patch('karbor.services.protection.clients.neutron.create') - @mock.patch('karbor.services.protection.clients.glance.create') - @mock.patch('karbor.services.protection.clients.nova.create') - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_create_backup_with_volumes(self, mock_cinder_client, - mock_nova_client, - mock_glance_client, - mock_neutron_client): - vm_resource = Resource(id="vm_id_2", - type=constants.SERVER_RESOURCE_TYPE, - name="fake_vm") - - protect_operation = self.plugin.get_protect_operation(vm_resource) - mock_cinder_client.return_value = self.cinder_client - mock_nova_client.return_value = self.nova_client - mock_glance_client.return_value = self.glance_client - mock_neutron_client.return_value = self.neutron_client - checkpoint = Checkpoint() - checkpoint.resource_graph = [FakeGraphNode(value=Resource( - type='OS::Nova::Server', id='vm_id_2', name='None'), - child_nodes=[FakeGraphNode(value=Resource( - type='OS::Cinder::Volume', id='vol_id_1', name=None), - child_nodes=())])] - - call_hooks(protect_operation, checkpoint, vm_resource, self.cntxt, - {}) - - self.assertEqual( - constants.RESOURCE_STATUS_AVAILABLE, - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/vm_id_2/status"] - ) - resource_definition = { - "resource_id": "vm_id_2", - 'boot_metadata': {'boot_device_type': 'volume'}, - "server_metadata": { - "availability_zone": "nova", - "networks": ["network_id_2"], - "floating_ips": [], - "flavor": "flavor_id", - "key_name": None, - "security_groups": "default", - }, - 'attach_metadata': { - 'vol_id_1': {'attachment_id': '', - 'bootable': '', - 'device': '/dev/vdb', - 'host_name': '', - 'id': 'attach_id_1', - 'server_id': 'vm_id_2', - 'volume_id': 'vol_id_1'} - }, - } - self.assertEqual( - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/vm_id_2/metadata"], - resource_definition - ) - - @mock.patch('karbor.services.protection.clients.glance.create') - def test_delete_backup(self, mock_glance_client): - resource = Resource(id="vm_id_1", - type=constants.SERVER_RESOURCE_TYPE, - name="fake_vm") - - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/vm_id_1/metadata"] = { - "resource_id": "vm_id_1", - "attach_metadata": {}, - "server_metadata": { - "availability_zone": "nova", - "networks": ["network_id_1"], - "floating_ips": [], - "flavor": "flavor_id", - "key_name": None, - "security_groups": "default", - }, - "snapshot_id": "image_id_2", - "snapshot_metadata": { - "disk_format": "", - "container_format": "", - "name": "snapshot_checkpoint_id@vm_id_1" - } - } - - delete_operation = self.plugin.get_delete_operation(resource) - mock_glance_client.return_value = self.glance_client - - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/vm_id_1/data_0" - ] = "image_data_1" - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/vm_id_1/data_1" - ] = "image_data_1" - - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_restore_result') - @mock.patch('karbor.services.protection.clients.neutron.create') - @mock.patch('karbor.services.protection.clients.glance.create') - @mock.patch('karbor.services.protection.clients.nova.create') - @mock.patch('karbor.services.protection.clients.cinder.create') - def test_restore_backup_with_parameters(self, mock_cinder_client, - mock_nova_client, - mock_glance_client, - mock_neutron_client, - mock_update_result): - resource = Resource(id='vm_id_1', - type=constants.SERVER_RESOURCE_TYPE, - name='fake_vm') - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/vm_id_1/metadata"] = { - "server_metadata": { - "availability_zone": "nova", - "key_name": None, - "floating_ips": [], - "flavor": "fake_flavor_id_1", - "networks": ["fake_net_id_1"], - "security_groups": [{"name": "default"}]}, - "boot_metadata": { - "boot_image_id": "fake_image_id", - "boot_device_type": "image"}, - "attach_metadata": {}, - "resource_id": "vm_id_1"} - restore_operation = self.plugin.get_restore_operation(resource) - mock_cinder_client.return_value = self.cinder_client - mock_nova_client.return_value = self.nova_client - mock_glance_client.return_value = self.glance_client - mock_neutron_client.return_value = self.neutron_client - parameters = {'restore_net_id': 'fake_net_id_2', - 'restore_flavor_id': 'fake_flavor_id_2'} - checkpoint = Checkpoint() - new_resources = {"new_resources": {"fake_image_id": "fake_image_id"}} - self.nova_client.servers.create = mock.MagicMock() - self.nova_client.servers.create.return_value = FakeServers['vm_id_2'] - call_hooks(restore_operation, checkpoint, resource, self.cntxt, - parameters, **new_resources) - properties = { - "availability_zone": "nova", - "flavor": "fake_flavor_id_2", - "name": "karbor-restore-server", - "image": "fake_image_id", - "key_name": None, - "security_groups": ['default'], - "nics": [{'net-id': 'fake_net_id_2'}], - "userdata": None - } - self.nova_client.servers.create.assert_called_with(**properties) - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - def test_verify_backup(self, mock_update_verify): - resource = Resource(id="123", - type=constants.SERVER_RESOURCE_TYPE, - name='fake') - - fake_bank._plugin._objects[ - "/resource_data/checkpoint_id/123/status" - ] = "available" - - verify_operation = self.plugin.get_verify_operation(resource) - call_hooks(verify_operation, self.checkpoint, resource, self.cntxt, - {}) - mock_update_verify.assert_called_with( - None, resource.type, resource.id, 'available') - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual([constants.SERVER_RESOURCE_TYPE], types) diff --git a/karbor/tests/unit/protection/test_pod_protection_plugin.py b/karbor/tests/unit/protection/test_pod_protection_plugin.py deleted file mode 100644 index aa45d9e5..00000000 --- a/karbor/tests/unit/protection/test_pod_protection_plugin.py +++ /dev/null @@ -1,194 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from kubernetes.client.models.v1_object_meta import V1ObjectMeta -from kubernetes.client.models.v1_pod import V1Pod -from kubernetes.client.models.v1_pod_spec import V1PodSpec -from kubernetes.client.models.v1_pod_status import V1PodStatus -from oslo_config import cfg -from oslo_config import fixture - -from karbor.common import constants -from karbor.context import RequestContext -from karbor.resource import Resource -from karbor.services.protection.bank_plugin import Bank -from karbor.services.protection.bank_plugin import BankPlugin -from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection import client_factory -from karbor.services.protection.clients import k8s -from karbor.services.protection.protection_plugins. \ - pod.pod_protection_plugin import PodProtectionPlugin -from karbor.services.protection.protection_plugins.pod \ - import pod_plugin_schemas -from karbor.tests import base - - -class FakeBankPlugin(BankPlugin): - def update_object(self, key, value, context=None): - return - - def get_object(self, key, context=None): - return - - def list_objects(self, prefix=None, limit=None, marker=None, - sort_dir=None, context=None): - return - - def delete_object(self, key, context=None): - return - - def get_owner_id(self, context=None): - return - - -fake_bank = Bank(FakeBankPlugin()) -fake_bank_section = BankSection(bank=fake_bank, section="fake") - - -def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): - def noop(*args, **kwargs): - pass - - hooks = ( - 'on_prepare_begin', - 'on_prepare_finish', - 'on_main', - 'on_complete', - ) - for hook_name in hooks: - hook = getattr(operation, hook_name, noop) - hook(checkpoint, resource, context, parameters, **kwargs) - - -class Checkpoint(object): - def __init__(self): - self.bank_section = fake_bank_section - - def get_resource_bank_section(self, resource_id): - return self.bank_section - - -class PodProtectionPluginTest(base.TestCase): - def setUp(self): - super(PodProtectionPluginTest, self).setUp() - - plugin_config = cfg.ConfigOpts() - plugin_config_fixture = self.useFixture(fixture.Config(plugin_config)) - plugin_config_fixture.load_raw_values( - group='poll_interval', - poll_interval=0, - ) - self.plugin = PodProtectionPlugin(plugin_config) - - k8s.register_opts(cfg.CONF) - cfg.CONF.set_default('k8s_host', - 'https://192.168.98.35:6443', - 'k8s_client') - cfg.CONF.set_default('k8s_ssl_ca_cert', - '/etc/provider.d/server-ca.crt', - 'k8s_client') - cfg.CONF.set_default('k8s_cert_file', - '/etc/provider.d/client-admin.crt', - 'k8s_client') - cfg.CONF.set_default('k8s_key_file', - '/etc/provider.d/client-admin.key', - 'k8s_client') - - self.cntxt = RequestContext(user_id='demo', - project_id='abcd', - auth_token='efgh', - service_catalog=None) - self.k8s_client = None - self.checkpoint = Checkpoint() - - def test_get_options_schema(self): - options_schema = self.plugin.get_options_schema( - constants.POD_RESOURCE_TYPE) - self.assertEqual(options_schema, - pod_plugin_schemas.OPTIONS_SCHEMA) - - def test_get_restore_schema(self): - options_schema = self.plugin.get_restore_schema( - constants.POD_RESOURCE_TYPE) - self.assertEqual(options_schema, - pod_plugin_schemas.RESTORE_SCHEMA) - - def test_get_saved_info_schema(self): - options_schema = self.plugin.get_saved_info_schema( - constants.POD_RESOURCE_TYPE) - self.assertEqual(options_schema, - pod_plugin_schemas.SAVED_INFO_SCHEMA) - - @mock.patch('karbor.services.protection.clients.k8s.create') - def test_create_backup(self, mock_k8s_create): - self.k8s_client = client_factory.ClientFactory.create_client( - "k8s", self.cntxt) - resource = Resource(id="c88b92a8-e8b4-504c-bad4-343d92061871", - type=constants.POD_RESOURCE_TYPE, - name='default:busybox-test') - - fake_bank_section.update_object = mock.MagicMock() - - protect_operation = self.plugin.get_protect_operation(resource) - mock_k8s_create.return_value = self.k8s_client - - self.k8s_client.read_namespaced_pod = mock.MagicMock() - self.k8s_client.read_namespaced_pod.return_value = V1Pod( - api_version="v1", - kind="Pod", - metadata=V1ObjectMeta( - name="busybox-test", - namespace="default", - uid="dd8236e1-8c6c-11e7-9b7a-fa163e18e097"), - spec=V1PodSpec(volumes=[], containers=[]), - status=V1PodStatus(phase="Running")) - fake_bank_section.update_object = mock.MagicMock() - call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, - {}) - - @mock.patch('karbor.services.protection.protection_plugins.utils.' - 'update_resource_verify_result') - def test_verify_backup(self, mock_update_verify): - resource = Resource(id="c88b92a8-e8b4-504c-bad4-343d92061871", - type=constants.POD_RESOURCE_TYPE, - name='default:busybox-test') - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = 'available' - - verify_operation = self.plugin.get_verify_operation(resource) - call_hooks(verify_operation, self.checkpoint, resource, self.cntxt, - {}) - mock_update_verify.assert_called_with( - None, resource.type, resource.id, 'available') - - def test_delete_backup(self): - resource = Resource(id="c88b92a8-e8b4-504c-bad4-343d92061871", - type=constants.POD_RESOURCE_TYPE, - name='default:busybox-test') - - fake_bank_section.get_object = mock.MagicMock() - fake_bank_section.get_object.return_value = { - "pod_id": "1234"} - fake_bank_section.list_objects = mock.MagicMock() - fake_bank_section.list_objects.return_value = [] - - delete_operation = self.plugin.get_delete_operation(resource) - call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, - {}) - - def test_get_supported_resources_types(self): - types = self.plugin.get_supported_resources_types() - self.assertEqual(types, - [constants.POD_RESOURCE_TYPE]) diff --git a/karbor/tests/unit/protection/test_protectable.py b/karbor/tests/unit/protection/test_protectable.py deleted file mode 100644 index 53d610d7..00000000 --- a/karbor/tests/unit/protection/test_protectable.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.resource import Resource -from karbor.services.protection.protectable_plugin import ProtectablePlugin -from karbor.services.protection.protectable_registry import ProtectableRegistry - -from karbor.tests import base - -_FAKE_TYPE = "Karbor::Test::Fake" - - -class _FakeProtectablePlugin(ProtectablePlugin): - def __init__(self, cntx, conf=None): - super(_FakeProtectablePlugin, self).__init__(cntx) - self.graph = {} - - def instance(self, cntx, conf=None): - new = self.__class__(cntx) - new.graph = self.graph - return new - - def get_resource_type(self): - return _FAKE_TYPE - - def get_parent_resource_types(self): - return _FAKE_TYPE - - def list_resources(self, context): - return self.graph.values() - - def show_resource(self, context, resource_id): - return [Resource(type=_FAKE_TYPE, - id=resource.id, - name=resource.name) - for resource in self.graph - if resource.id == resource_id] - - def get_dependent_resources(self, context, parent_resource): - return self.graph[parent_resource] - - -class ProtectableRegistryTest(base.TestCase): - def setUp(self): - super(ProtectableRegistryTest, self).setUp() - self.protectable_registry = ProtectableRegistry() - self._fake_plugin = _FakeProtectablePlugin(None) - self.protectable_registry.register_plugin(self._fake_plugin) - - def test_graph_building(self): - A = Resource(_FAKE_TYPE, "A", 'nameA') - B = Resource(_FAKE_TYPE, "B", 'nameB') - C = Resource(_FAKE_TYPE, "C", 'nameC') - test_matrix = ( - ( - {A: [B], - B: [C], - C: []}, - (A, C) - ), - ( - {A: [C], - B: [C], - C: []}, - (A, C) - ), - ) - - for g, resources in test_matrix: - self._fake_plugin.graph = g - result_graph = self.protectable_registry.build_graph(None, - resources) - self.assert_graph(result_graph, g) - self.protectable_registry._protectable_map = {} - - def assert_graph(self, g, g_dict): - for item in g: - expected = set(g_dict[item.value]) - found = set(child.value for child in item.child_nodes) - self.assertEqual(expected, found) - self.assert_graph(item.child_nodes, g_dict) diff --git a/karbor/tests/unit/protection/test_provider.py b/karbor/tests/unit/protection/test_provider.py deleted file mode 100755 index 2d449703..00000000 --- a/karbor/tests/unit/protection/test_provider.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from karbor.resource import Resource -from karbor.services.protection import provider -from karbor.tests import base -from karbor.tests.unit.protection import fakes - -CONF = cfg.CONF - -( - parent_type, - child_type, - grandchild_type, -) = fakes.FakeProtectionPlugin.SUPPORTED_RESOURCES - -parent = Resource(id='A1', name='parent', type=parent_type) -child = Resource(id='B1', name='child', type=child_type) -grandchild = Resource(id='C1', name='grandchild', type=grandchild_type) -resource_graph = { - parent: [child], - child: [grandchild], - grandchild: [], -} - - -def set_provider_list(provider_registry): - provider_registry.providers = { - 'fake_provider_id_1': FakeProvider( - id='fake_provider_id_1', - name='fake_provider_name_1', - description='Fake provider 1 description', - extended_info_schema='' - ), - 'fake_provider_id_2': FakeProvider( - id='fake_provider_id_2', - name='fake_provider_name_2', - description='Fake provider 2 description', - extended_info_schema='' - ) - } - - -class FakeProvider(object): - def __init__(self, id, name, description, extended_info_schema): - self.id = id - self.name = name - self.description = description - self.extended_info_schema = extended_info_schema - - -class ProviderRegistryTest(base.TestCase): - def setUp(self): - super(ProviderRegistryTest, self).setUp() - - @mock.patch.object(provider.PluggableProtectionProvider, '_load_bank') - @mock.patch.object(provider.PluggableProtectionProvider, - '_register_plugin') - def test_load_providers(self, mock_register_plugin, mock_load_bank): - pr = provider.ProviderRegistry() - self.assertEqual(1, mock_register_plugin.call_count) - self.assertEqual(1, mock_load_bank.call_count) - self.assertEqual(1, len(pr.providers)) - - self.assertEqual('fake_provider1', pr.providers['fake_id1'].name) - self.assertNotIn('fake_provider2', pr.providers) - - def test_provider_bank_config(self): - pr = provider.ProviderRegistry() - provider1 = pr.show_provider('fake_id1') - self.assertEqual('thor', provider1.bank._plugin.fake_host) - - def test_provider_plugin_config(self): - pr = provider.ProviderRegistry() - provider1 = pr.show_provider('fake_id1') - plugins = provider1.load_plugins() - self.assertEqual('user', plugins['Test::ResourceA'].fake_user) - - def test_list_provider(self): - pr = provider.ProviderRegistry() - set_provider_list(pr) - self.assertEqual(2, len(pr.list_providers())) - - def test_list_provider_with_marker(self): - pr = provider.ProviderRegistry() - set_provider_list(pr) - self.assertEqual( - 1, len(pr.list_providers(marker='fake_provider_id_1'))) - - def test_list_provider_with_limit(self): - pr = provider.ProviderRegistry() - set_provider_list(pr) - self.assertEqual( - 1, len(pr.list_providers(limit=1))) - - def test_list_provider_with_filters(self): - pr = provider.ProviderRegistry() - set_provider_list(pr) - filters = {'name': 'fake_provider_name_1'} - self.assertEqual(1, len(pr.list_providers(filters=filters))) - - def test_show_provider(self): - pr = provider.ProviderRegistry() - provider_list = pr.list_providers() - for provider_node in provider_list: - self.assertTrue(pr.show_provider(provider_node['id'])) diff --git a/karbor/tests/unit/protection/test_resource_flow.py b/karbor/tests/unit/protection/test_resource_flow.py deleted file mode 100644 index 55ea1e5f..00000000 --- a/karbor/tests/unit/protection/test_resource_flow.py +++ /dev/null @@ -1,225 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from functools import partial -from unittest import mock - -from oslo_config import cfg - -from karbor.common import constants -from karbor.resource import Resource -from karbor.services.protection.flows.workflow import TaskFlowEngine -from karbor.services.protection import graph -from karbor.services.protection import resource_flow -from karbor.tests import base -from karbor.tests.unit.protection import fakes - -CONF = cfg.CONF - -( - parent_type, - child_type, - grandchild_type, -) = fakes.FakeProtectionPlugin.SUPPORTED_RESOURCES - -parent = Resource(id='A1', name='parent', type=parent_type) -child = Resource(id='B1', name='child', type=child_type) -grandchild = Resource(id='C1', name='grandchild', type=grandchild_type) - - -class ResourceFlowTest(base.TestCase): - def setUp(self): - super(ResourceFlowTest, self).setUp() - - self.resource_graph = { - parent: [child], - child: [grandchild], - grandchild: [], - } - - self.provider = fakes.FakeProvider() - self.test_graph = graph.build_graph([parent], - self.resource_graph.__getitem__) - self.taskflow_engine = TaskFlowEngine() - - def _walk_operation(self, protection, operation_type, - checkpoint='checkpoint', parameters={}, context=None, - **kwargs): - plugin_map = { - parent_type: protection, - child_type: protection, - grandchild_type: protection, - } - flow = resource_flow.build_resource_flow(operation_type, - context, - self.taskflow_engine, - plugin_map, - self.test_graph, - parameters) - - store = { - 'checkpoint': checkpoint, - 'operation_log': None - } - store.update(kwargs) - - engine = self.taskflow_engine.get_engine(flow, - engine='parallel', - store=store) - self.taskflow_engine.run_engine(engine) - - @mock.patch('karbor.tests.unit.protection.fakes.FakeProtectionPlugin') - def test_resource_no_impl(self, mock_protection): - for operation in constants.OPERATION_TYPES: - kwargs = {} - if operation == constants.OPERATION_RESTORE: - kwargs['new_resources'] = {} - kwargs['restore'] = None - elif operation == constants.OPERATION_VERIFY: - kwargs['new_resources'] = {} - kwargs['verify'] = None - elif operation == constants.OPERATION_COPY: - kwargs['checkpoint_copy'] = None - self._walk_operation(mock_protection, operation, **kwargs) - - @mock.patch('karbor.tests.unit.protection.fakes.FakeProtectionPlugin') - def test_resource_flow_callbacks(self, mock_protection): - for operation in constants.OPERATION_TYPES: - mock_operation = fakes.MockOperation() - get_operation_attr = 'get_{}_operation'.format(operation) - getattr( - mock_protection, - get_operation_attr - ).return_value = mock_operation - - kwargs = {} - if operation == constants.OPERATION_RESTORE: - kwargs['new_resources'] = {} - kwargs['restore'] = None - elif operation == constants.OPERATION_VERIFY: - kwargs['new_resources'] = {} - kwargs['verify'] = None - elif operation == constants.OPERATION_COPY: - kwargs['checkpoint_copy'] = None - self._walk_operation(mock_protection, operation, **kwargs) - - self.assertEqual(mock_operation.on_prepare_begin.call_count, - len(self.resource_graph)) - self.assertEqual(mock_operation.on_prepare_finish.call_count, - len(self.resource_graph)) - self.assertEqual(mock_operation.on_main.call_count, - len(self.resource_graph)) - self.assertEqual(mock_operation.on_complete.call_count, - len(self.resource_graph)) - - @mock.patch('karbor.tests.unit.protection.fakes.FakeProtectionPlugin') - def test_resource_flow_parameters(self, mock_protection): - resource_a1_id = "{}#{}".format(parent_type, 'A1') - resource_b1_id = "{}#{}".format(child_type, 'B1') - parameters = { - resource_a1_id: {'option1': 'value1'}, - resource_b1_id: {'option2': 'value2', 'option3': 'value3'}, - parent_type: {'option4': 'value4'}, - child_type: {'option3': 'value5'} - } - - def _compare_parameters(resource, func, expect_parameters): - result = fake_operation.all_invokes[resource][func] - for k, v in expect_parameters.items(): - self.assertEqual(v, result[k]) - - for operation in constants.OPERATION_TYPES: - if operation == constants.OPERATION_COPY: - continue - fake_operation = fakes.FakeOperation() - get_operation_attr = 'get_{}_operation'.format(operation) - getattr( - mock_protection, - get_operation_attr - ).return_value = fake_operation - - args = { - 'checkpoint': 'A', - 'context': 'B', - } - - kwargs = args.copy() - kwargs['operation_log'] = None - if operation == constants.OPERATION_RESTORE: - kwargs['new_resources'] = {} - kwargs['restore'] = None - elif operation == constants.OPERATION_VERIFY: - kwargs['new_resources'] = {} - kwargs['verify'] = None - elif operation == constants.OPERATION_COPY: - kwargs['checkpoint_copy'] = None - - self._walk_operation(mock_protection, operation, - parameters=parameters, **kwargs) - - for resource in self.resource_graph: - resource_params = parameters.get(resource.type, {}) - resource_id = '{}#{}'.format(resource.type, resource.id) - resource_params.update(parameters.get(resource_id, {})) - args['resource'] = resource - args['parameters'] = resource_params - _compare_parameters(resource, 'on_prepare_begin', args) - _compare_parameters(resource, 'on_prepare_finish', args) - _compare_parameters(resource, 'on_main', args) - _compare_parameters(resource, 'on_complete', args) - - @mock.patch('karbor.tests.unit.protection.fakes.FakeProtectionPlugin') - def test_resource_flow_order(self, mock_protection): - def test_order(order_list, hook_type, resource, *args, **kwargs): - order_list.append((hook_type, resource.id)) - - operation = constants.OPERATION_PROTECT - mock_operation = fakes.MockOperation() - get_operation_attr = 'get_{}_operation'.format(operation) - getattr( - mock_protection, - get_operation_attr - ).return_value = mock_operation - - order_list = [] - mock_operation.on_prepare_begin = partial(test_order, order_list, - 'pre_begin') - mock_operation.on_prepare_finish = partial(test_order, order_list, - 'pre_finish') - mock_operation.on_main = partial(test_order, order_list, 'main') - mock_operation.on_complete = partial(test_order, order_list, - 'complete') - - self._walk_operation(mock_protection, operation) - - self.assertLess(order_list.index(('pre_begin', parent.id)), - order_list.index(('pre_begin', child.id))) - self.assertLess(order_list.index(('pre_begin', child.id)), - order_list.index(('pre_begin', grandchild.id))) - - self.assertGreater(order_list.index(('pre_finish', parent.id)), - order_list.index(('pre_finish', child.id))) - self.assertGreater(order_list.index(('pre_finish', child.id)), - order_list.index(('pre_finish', grandchild.id))) - - self.assertGreater(order_list.index(('complete', parent.id)), - order_list.index(('complete', child.id))) - self.assertGreater(order_list.index(('complete', child.id)), - order_list.index(('complete', grandchild.id))) - - for resource_id in (parent.id, child.id, grandchild.id): - self.assertLess(order_list.index(('pre_begin', resource_id)), - order_list.index(('pre_finish', resource_id))) - self.assertLess(order_list.index(('pre_finish', resource_id)), - order_list.index(('main', resource_id))) - self.assertLess(order_list.index(('main', resource_id)), - order_list.index(('complete', resource_id))) diff --git a/karbor/tests/unit/protection/test_s3_bank_plugin.py b/karbor/tests/unit/protection/test_s3_bank_plugin.py deleted file mode 100644 index ad9e9425..00000000 --- a/karbor/tests/unit/protection/test_s3_bank_plugin.py +++ /dev/null @@ -1,99 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import math -import time -from unittest import mock - -from oslo_config import cfg -from oslo_utils import importutils - -from karbor.services.protection.clients import s3 -from karbor.tests import base -from karbor.tests.unit.protection.fake_s3_client import FakeS3Client - -CONF = cfg.CONF - - -class FakeConf(object): - def __init__(self): - super(FakeConf, self).__init__() - self.lease_expire_window = 600 - self.lease_renew_window = 120 - self.lease_validity_window = 100 - - -class S3BankPluginTest(base.TestCase): - def setUp(self): - super(S3BankPluginTest, self).setUp() - self.conf = FakeConf() - self.fake_connection = FakeS3Client.connection() - s3.create = mock.MagicMock() - s3.create.return_value = self.fake_connection - import_str = ( - "karbor.services.protection.bank_plugins." - "s3_bank_plugin.S3BankPlugin" - ) - self.object_bucket = "objects" - s3_bank_plugin_cls = importutils.import_class( - import_str=import_str) - - self.s3_bank_plugin = s3_bank_plugin_cls(CONF, None) - - def test_acquire_lease(self): - self.s3_bank_plugin.acquire_lease() - expire_time = math.floor(time.time()) + self.conf.lease_expire_window - self.assertEqual(self.s3_bank_plugin.lease_expire_time, expire_time) - - def test_renew_lease(self): - self.s3_bank_plugin.acquire_lease() - expire_time = math.floor(time.time()) + self.conf.lease_expire_window - self.assertEqual(self.s3_bank_plugin.lease_expire_time, expire_time) - time.sleep(5) - self.s3_bank_plugin.acquire_lease() - expire_time = math.floor(time.time()) + self.conf.lease_expire_window - self.assertEqual(self.s3_bank_plugin.lease_expire_time, expire_time) - - def test_check_lease_validity(self): - self.s3_bank_plugin.acquire_lease() - expire_time = math.floor(time.time()) + self.conf.lease_expire_window - self.assertEqual(self.s3_bank_plugin.lease_expire_time, expire_time) - is_valid = self.s3_bank_plugin.check_lease_validity() - self.assertEqual(is_valid, True) - - def test_delete_object(self): - self.s3_bank_plugin.update_object("key", "value") - self.s3_bank_plugin.delete_object("key") - object_list = self.s3_bank_plugin.list_objects() - self.assertEqual('key' in object_list, False) - - def test_get_object(self): - self.s3_bank_plugin.update_object("key", "value") - value = self.s3_bank_plugin.get_object("key") - self.assertEqual(value, "value") - - def test_list_objects(self): - self.s3_bank_plugin.update_object("key-1", "value-1") - self.s3_bank_plugin.update_object("key-2", "value-2") - objects = self.s3_bank_plugin.list_objects(prefix=None) - self.assertEqual(len(objects), 2) - - def test_update_object(self): - self.s3_bank_plugin.update_object("key-1", "value-1") - self.s3_bank_plugin.update_object("key-1", "value-2") - contents = self.s3_bank_plugin.get_object('key-1') - self.assertEqual(contents, "value-2") - - def test_create_get_dict_object(self): - self.s3_bank_plugin.update_object("dict_object", {"key": "value"}) - value = self.s3_bank_plugin.get_object("dict_object") - self.assertEqual(value, {"key": "value"}) diff --git a/karbor/tests/unit/protection/test_swift_bank_plugin.py b/karbor/tests/unit/protection/test_swift_bank_plugin.py deleted file mode 100644 index f3eedaaf..00000000 --- a/karbor/tests/unit/protection/test_swift_bank_plugin.py +++ /dev/null @@ -1,106 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import math -import os -import time -from unittest import mock - -from oslo_config import cfg -from oslo_utils import importutils - -from karbor.services.protection.clients import swift -from karbor.tests import base -from karbor.tests.unit.protection.fake_swift_client import FakeSwiftClient - -CONF = cfg.CONF - - -class FakeConf(object): - def __init__(self): - super(FakeConf, self).__init__() - self.lease_expire_window = 600 - self.lease_renew_window = 120 - self.lease_validity_window = 100 - - -class SwiftBankPluginTest(base.TestCase): - def setUp(self): - super(SwiftBankPluginTest, self).setUp() - self.conf = FakeConf() - self.fake_connection = FakeSwiftClient.connection() - swift.create = mock.MagicMock() - swift.create.return_value = self.fake_connection - import_str = ( - "karbor.services.protection.bank_plugins." - "swift_bank_plugin.SwiftBankPlugin" - ) - self.object_container = "objects" - swift_bank_plugin_cls = importutils.import_class( - import_str=import_str) - - self.swift_bank_plugin = swift_bank_plugin_cls(CONF, None) - - def test_acquire_lease(self): - self.swift_bank_plugin.acquire_lease() - expire_time = math.floor(time.time()) + self.conf.lease_expire_window - self.assertEqual(self.swift_bank_plugin.lease_expire_time, expire_time) - - def test_renew_lease(self): - self.swift_bank_plugin.acquire_lease() - expire_time = math.floor(time.time()) + self.conf.lease_expire_window - self.assertEqual(self.swift_bank_plugin.lease_expire_time, expire_time) - time.sleep(5) - self.swift_bank_plugin.acquire_lease() - expire_time = math.floor(time.time()) + self.conf.lease_expire_window - self.assertEqual(self.swift_bank_plugin.lease_expire_time, expire_time) - - def test_check_lease_validity(self): - self.swift_bank_plugin.acquire_lease() - expire_time = math.floor(time.time()) + self.conf.lease_expire_window - self.assertEqual(self.swift_bank_plugin.lease_expire_time, expire_time) - is_valid = self.swift_bank_plugin.check_lease_validity() - self.assertTrue(is_valid) - - def test_delete_object(self): - self.swift_bank_plugin.update_object("key", "value") - self.swift_bank_plugin.delete_object("key") - object_file = os.path.join(self.fake_connection.swiftdir, - "karbor", - "key") - self.assertFalse(os.path.isfile(object_file)) - - def test_get_object(self): - self.swift_bank_plugin.update_object("key", "value") - value = self.swift_bank_plugin.get_object("key") - self.assertEqual("value", value) - - def test_list_objects(self): - self.swift_bank_plugin.update_object("key-1", "value-1") - self.swift_bank_plugin.update_object("key-2", "value-2") - objects = self.swift_bank_plugin.list_objects(prefix=None) - self.assertEqual(2, len(objects)) - - def test_update_object(self): - self.swift_bank_plugin.update_object("key-1", "value-1") - self.swift_bank_plugin.update_object("key-1", "value-2") - object_file = os.path.join(self.fake_connection.swiftdir, - "karbor", - "key-1") - with open(object_file, "r") as f: - contents = f.read() - self.assertEqual("value-2", contents) - - def test_create_get_dict_object(self): - self.swift_bank_plugin.update_object("dict_object", {"key": "value"}) - value = self.swift_bank_plugin.get_object("dict_object") - self.assertEqual({"key": "value"}, value) diff --git a/karbor/tests/unit/protection/test_workflow.py b/karbor/tests/unit/protection/test_workflow.py deleted file mode 100644 index 9dbea921..00000000 --- a/karbor/tests/unit/protection/test_workflow.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.services.protection.flows import workflow -from karbor.tests import base - - -def fake_func(): - return True - - -class WorkFlowTest(base.TestCase): - def setUp(self): - super(WorkFlowTest, self).setUp() - self.workflow_engine = workflow.TaskFlowEngine() - - def test_create_task(self): - test_task = self.workflow_engine.create_task(fake_func, name='fake') - self.assertEqual('fake', test_task.name) - - def test_add_task(self): - test_flow = self.workflow_engine.build_flow('test') - test_task = self.workflow_engine.create_task(fake_func, name='fake') - self.workflow_engine.add_tasks(test_flow, test_task) - self.assertEqual(1, len(test_flow)) - - def test_search_task(self): - flow = self.workflow_engine.build_flow('test') - task1 = self.workflow_engine.create_task(fake_func, name='fake_func') - task2 = self.workflow_engine.create_task(fake_func, name='fake_func2') - self.workflow_engine.add_tasks(flow, task1, task2) - result = self.workflow_engine.search_task(flow, 'fake_func2') - self.assertEqual('fake_func2', getattr(result, 'name')) diff --git a/karbor/tests/unit/test_cmd.py b/karbor/tests/unit/test_cmd.py deleted file mode 100644 index f0042d38..00000000 --- a/karbor/tests/unit/test_cmd.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -try: - from unittest import mock -except ImportError: - import mock -from oslo_config import cfg -from oslo_db import exception as db_exc - -from karbor.cmd import api as karbor_api -from karbor.cmd import manage as karbor_manage -from karbor.tests import base -from karbor import version - -CONF = cfg.CONF - - -class TestKarborApiCmd(base.TestCase): - """Unit test cases for python modules under karbor/cmd.""" - - def setUp(self): - super(TestKarborApiCmd, self).setUp() - sys.argv = ['karbor-api'] - CONF(sys.argv[1:], project='karbor', version=version.version_string()) - - def tearDown(self): - super(TestKarborApiCmd, self).tearDown() - - @mock.patch('karbor.service.WSGIService') - @mock.patch('karbor.service.process_launcher') - @mock.patch('karbor.rpc.init') - @mock.patch('oslo_log.log.setup') - def test_main(self, log_setup, rpc_init, process_launcher, - wsgi_service): - launcher = process_launcher.return_value - server = wsgi_service.return_value - server.workers = mock.sentinel.worker_count - - karbor_api.main() - - self.assertEqual('karbor', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "karbor") - rpc_init.assert_called_once_with(CONF) - process_launcher.assert_called_once_with() - wsgi_service.assert_called_once_with('osapi_karbor') - launcher.launch_service.assert_called_once_with(server, - workers=server.workers) - launcher.wait.assert_called_once_with() - - -class TestKarborManageCmd(base.TestCase): - """Unit test cases for python modules under karbor/cmd/manage.py.""" - - def setUp(self): - super(TestKarborManageCmd, self).setUp() - sys.argv = ['karbor-manage'] - CONF(sys.argv[1:], project='karbor', version=version.version_string()) - - def tearDown(self): - super(TestKarborManageCmd, self).tearDown() - - def test_db_commands_upgrade_out_of_range(self): - version = 1111111111 - db_cmds = karbor_manage.DbCommands() - exit = self.assertRaises(SystemExit, db_cmds.sync, version + 1) - self.assertEqual(1, exit.code) - - @mock.patch("oslo_db.sqlalchemy.migration.db_sync") - def test_db_commands_script_not_present(self, db_sync): - db_sync.side_effect = db_exc.DBMigrationError(None) - db_cmds = karbor_manage.DbCommands() - exit = self.assertRaises(SystemExit, db_cmds.sync, 101) - self.assertEqual(1, exit.code) diff --git a/karbor/tests/unit/test_context.py b/karbor/tests/unit/test_context.py deleted file mode 100644 index 8302b577..00000000 --- a/karbor/tests/unit/test_context.py +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor import context -from karbor.tests import base - - -class ContextTestCase(base.TestCase): - - def test_request_context_sets_is_admin(self): - ctxt = context.RequestContext('111', - '222', - roles=['admin', 'weasel']) - self.assertTrue(ctxt.is_admin) - - def test_request_context_sets_is_admin_upcase(self): - ctxt = context.RequestContext('111', - '222', - roles=['Admin', 'weasel']) - self.assertTrue(ctxt.is_admin) - - def test_request_context_read_deleted(self): - ctxt = context.RequestContext('111', - '222', - read_deleted='yes') - self.assertEqual('yes', ctxt.read_deleted) - - ctxt.read_deleted = 'no' - self.assertEqual('no', ctxt.read_deleted) - - def test_request_context_read_deleted_invalid(self): - self.assertRaises(ValueError, - context.RequestContext, - '111', - '222', - read_deleted=True) - - ctxt = context.RequestContext('111', '222') - self.assertRaises(ValueError, - setattr, - ctxt, - 'read_deleted', - True) - - def test_request_context_elevated(self): - user_context = context.RequestContext( - 'fake_user', 'fake_project', is_admin=False) - self.assertFalse(user_context.is_admin) - admin_context = user_context.elevated() - self.assertFalse(user_context.is_admin) - self.assertTrue(admin_context.is_admin) - self.assertNotIn('admin', user_context.roles) - self.assertIn('admin', admin_context.roles) - - def test_service_catalog_nova_and_swift(self): - service_catalog = [ - {u'type': u'compute', u'name': u'nova'}, - {u'type': u's3', u'name': u's3'}, - {u'type': u'image', u'name': u'glance'}, - {u'type': u'volume', u'name': u'cinder'}, - {u'type': u'ec2', u'name': u'ec2'}, - {u'type': u'object-store', u'name': u'swift'}, - {u'type': u'identity', u'name': u'keystone'}, - {u'type': None, u'name': u'S_withtypeNone'}, - {u'type': u'co', u'name': u'S_partofcompute'}] - - compute_catalog = [{u'type': u'compute', u'name': u'nova'}] - object_catalog = [{u'name': u'swift', u'type': u'object-store'}] - ctxt = context.RequestContext('111', '222', - service_catalog=service_catalog) - self.assertEqual(5, len(ctxt.service_catalog)) - return_compute = [v for v in ctxt.service_catalog if - v['type'] == u'compute'] - return_object = [v for v in ctxt.service_catalog if - v['type'] == u'object-store'] - self.assertEqual(compute_catalog, return_compute) - self.assertEqual(object_catalog, return_object) - - def test_user_identity(self): - ctx = context.RequestContext("user", "tenant", - domain="domain", - user_domain="user-domain", - project_domain="project-domain") - self.assertEqual('user tenant domain user-domain project-domain', - ctx.to_dict()["user_identity"]) diff --git a/karbor/tests/unit/test_exception.py b/karbor/tests/unit/test_exception.py deleted file mode 100644 index 7d85662c..00000000 --- a/karbor/tests/unit/test_exception.py +++ /dev/null @@ -1,106 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import six -from six.moves import http_client -import webob.util - -from karbor import exception -from karbor.tests import base - - -class KarborExceptionTestCase(base.TestCase): - def test_default_error_msg(self): - class FakeKarborException(exception.KarborException): - message = "default message" - - exc = FakeKarborException() - self.assertEqual('default message', six.text_type(exc)) - - def test_error_msg(self): - self.assertEqual('test', - six.text_type(exception.KarborException('test'))) - - def test_default_error_msg_with_kwargs(self): - class FakeKarborException(exception.KarborException): - message = "default message: %(code)s" - - exc = FakeKarborException(code=500) - self.assertEqual('default message: 500', six.text_type(exc)) - - def test_error_msg_exception_with_kwargs(self): - # NOTE(dprince): disable format errors for this test - self.flags(fatal_exception_format_errors=False) - - class FakeKarborException(exception.KarborException): - message = "default message: %(misspelled_code)s" - - exc = FakeKarborException(code=http_client.INTERNAL_SERVER_ERROR) - self.assertEqual('default message: %(misspelled_code)s', - six.text_type(exc)) - - def test_default_error_code(self): - class FakeKarborException(exception.KarborException): - code = http_client.NOT_FOUND - - exc = FakeKarborException() - self.assertEqual(http_client.NOT_FOUND, exc.kwargs['code']) - - def test_error_code_from_kwarg(self): - class FakeKarborException(exception.KarborException): - code = http_client.INTERNAL_SERVER_ERROR - - exc = FakeKarborException(code=http_client.NOT_FOUND) - self.assertEqual(http_client.NOT_FOUND, exc.kwargs['code']) - - def test_error_msg_is_exception_to_string(self): - msg = 'test message' - exc1 = Exception(msg) - exc2 = exception.KarborException(exc1) - self.assertEqual(msg, exc2.msg) - - def test_message_in_format_string(self): - class FakeKarborException(exception.KarborException): - message = 'FakeKarborException: %(message)s' - - exc = FakeKarborException(message='message') - self.assertEqual('message', six.text_type(exc)) - - def test_message_and_kwarg_in_format_string(self): - class FakeKarborException(exception.KarborException): - message = 'Error %(code)d: %(msg)s' - - exc = FakeKarborException(code=http_client.NOT_FOUND, msg='message') - self.assertEqual('Error 404: message', six.text_type(exc)) - - -class KarborConvertedExceptionTestCase(base.TestCase): - def test_default_args(self): - exc = exception.ConvertedException() - self.assertNotEqual('', exc.title) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, exc.code) - self.assertEqual('', exc.explanation) - - def test_standard_status_code(self): - with mock.patch.dict(webob.util.status_reasons, {200: 'reason'}): - exc = exception.ConvertedException(code=200) - self.assertEqual('reason', exc.title) - - @mock.patch.dict(webob.util.status_reasons, - {http_client.INTERNAL_SERVER_ERROR: 'reason'}) - def test_generic_status_code(self): - with mock.patch.dict(webob.util.status_generic_reasons, - {5: 'generic_reason'}): - exc = exception.ConvertedException(code=599) - self.assertEqual('generic_reason', exc.title) diff --git a/karbor/tests/unit/test_policy.py b/karbor/tests/unit/test_policy.py deleted file mode 100644 index 1bc05360..00000000 --- a/karbor/tests/unit/test_policy.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os.path - -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslo_policy import policy as oslo_policy - -from karbor import context -from karbor import exception -from karbor.tests import base -from karbor import utils - -from karbor import policy - -CONF = cfg.CONF - - -class PolicyFileTestCase(base.TestCase): - - def setUp(self): - super(PolicyFileTestCase, self).setUp() - self.context = context.get_admin_context() - self.target = {} - self.fixture = self.useFixture(config_fixture.Config(CONF)) - self.addCleanup(policy.reset) - - def test_modified_policy_reloads(self): - with utils.tempdir() as tmpdir: - tmpfilename = os.path.join(tmpdir, 'policy') - self.fixture.config(policy_file=tmpfilename, group='oslo_policy') - policy.reset() - policy.init() - rule = oslo_policy.RuleDefault('example:test', "") - policy._ENFORCER.register_defaults([rule]) - - action = "example:test" - with open(tmpfilename, "w") as policyfile: - policyfile.write('{"example:test": ""}') - policy.authorize(self.context, action, self.target) - with open(tmpfilename, "w") as policyfile: - policyfile.write('{"example:test": "!"}') - policy._ENFORCER.load_rules(True) - self.assertRaises(exception.PolicyNotAuthorized, - policy.authorize, - self.context, action, self.target) - - -class PolicyTestCase(base.TestCase): - - def setUp(self): - super(PolicyTestCase, self).setUp() - rules = [ - oslo_policy.RuleDefault("true", '@'), - oslo_policy.RuleDefault("test:allowed", '@'), - oslo_policy.RuleDefault("test:denied", "!"), - oslo_policy.RuleDefault("test:my_file", - "role:compute_admin or " - "project_id:%(project_id)s"), - oslo_policy.RuleDefault("test:early_and_fail", "! and @"), - oslo_policy.RuleDefault("test:early_or_success", "@ or !"), - oslo_policy.RuleDefault("test:lowercase_admin", - "role:admin"), - oslo_policy.RuleDefault("test:uppercase_admin", - "role:ADMIN"), - ] - policy.reset() - policy.init() - # before a policy rule can be used, its default has to be registered. - policy._ENFORCER.register_defaults(rules) - self.context = context.RequestContext('fake', 'fake', roles=['member']) - self.target = {} - self.addCleanup(policy.reset) - - def test_authorize_nonexistent_action_throws(self): - action = "test:noexist" - self.assertRaises(oslo_policy.PolicyNotRegistered, policy.authorize, - self.context, action, self.target) - - def test_authorize_bad_action_throws(self): - action = "test:denied" - self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, - self.context, action, self.target) - - def test_authorize_bad_action_noraise(self): - action = "test:denied" - result = policy.authorize(self.context, action, self.target, False) - self.assertFalse(result) - - def test_authorize_good_action(self): - action = "test:allowed" - result = policy.authorize(self.context, action, self.target) - self.assertTrue(result) - - def test_templatized_authorization(self): - target_mine = {'project_id': 'fake'} - target_not_mine = {'project_id': 'another'} - action = "test:my_file" - policy.authorize(self.context, action, target_mine) - self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, - self.context, action, target_not_mine) - - def test_early_AND_authorization(self): - action = "test:early_and_fail" - self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, - self.context, action, self.target) - - def test_early_OR_authorization(self): - action = "test:early_or_success" - policy.authorize(self.context, action, self.target) - - def test_ignore_case_role_check(self): - lowercase_action = "test:lowercase_admin" - uppercase_action = "test:uppercase_admin" - admin_context = context.RequestContext('admin', - 'fake', - roles=['AdMiN']) - policy.authorize(admin_context, lowercase_action, self.target) - policy.authorize(admin_context, uppercase_action, self.target) diff --git a/karbor/tests/unit/test_rpc.py b/karbor/tests/unit/test_rpc.py deleted file mode 100644 index e7c3e0c8..00000000 --- a/karbor/tests/unit/test_rpc.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from karbor import rpc -from karbor.tests import base - - -CONF = cfg.CONF - - -class RPCAPITestCase(base.TestCase): - """Tests RPCAPI mixin aggregating stuff related to RPC compatibility.""" - - def setUp(self): - super(RPCAPITestCase, self).setUp() - - @mock.patch('oslo_messaging.JsonPayloadSerializer', wraps=True) - def test_init_no_notifications(self, serializer_mock): - """Test short-circuiting notifications with default and noop driver.""" - driver = ['noop'] - self.override_config('driver', driver, - group='oslo_messaging_notifications') - rpc.init(CONF) - self.assertEqual(rpc.utils.DO_NOTHING, rpc.NOTIFIER) - serializer_mock.assert_not_called() - - @mock.patch.object(rpc, 'messaging') - def test_init_notifications(self, messaging_mock): - self.override_config('driver', ['test'], - group='oslo_messaging_notifications') - rpc.init(CONF) - self.assertTrue(messaging_mock.JsonPayloadSerializer.called) - self.assertTrue(messaging_mock.Notifier.called) - self.assertEqual(rpc.NOTIFIER, messaging_mock.Notifier.return_value) diff --git a/karbor/tests/unit/test_service.py b/karbor/tests/unit/test_service.py deleted file mode 100644 index 7e15a736..00000000 --- a/karbor/tests/unit/test_service.py +++ /dev/null @@ -1,302 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for remote procedure calls using queue -""" - -from unittest import mock - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_db import exception as db_exc - -from karbor import context -from karbor import db -from karbor import exception -from karbor import manager -from karbor import rpc -from karbor import service -from karbor.tests import base - - -test_service_opts = [ - cfg.StrOpt("fake_manager", - default="karbor.tests.unit.test_service.FakeManager", - help="Manager for testing"), - cfg.StrOpt("test_service_listen", - help="Host to bind test service to"), - cfg.PortOpt("test_service_listen_port", - default=8799, - help="Port number to bind test service to"), ] - -CONF = cfg.CONF -CONF.register_opts(test_service_opts) - - -class FakeManager(manager.Manager): - """Fake manager for tests.""" - def __init__(self, host=None, - db_driver=None, service_name=None): - super(FakeManager, self).__init__(host=host, - db_driver=db_driver) - - def test_method(self): - return 'manager' - - -class ExtendedService(service.Service): - def test_method(self): - return 'service' - - -class ServiceManagerTestCase(base.TestCase): - """Test cases for Services.""" - - def test_message_gets_to_manager(self): - serv = service.Service('test', - 'test', - 'test', - 'karbor.tests.unit.test_service.FakeManager') - serv.start() - self.assertEqual('manager', serv.test_method()) - - def test_override_manager_method(self): - serv = ExtendedService('test', - 'test', - 'test', - 'karbor.tests.unit.test_service.FakeManager') - serv.start() - self.assertEqual('service', serv.test_method()) - - -class ServiceFlagsTestCase(base.TestCase): - def test_service_enabled_on_create_based_on_flag(self): - self.flags(enable_new_services=True) - host = 'foo' - binary = 'karbor-fake' - app = service.Service.create(host=host, binary=binary) - app.start() - app.stop() - ref = db.service_get(context.get_admin_context(), app.service_id) - db.service_destroy(context.get_admin_context(), app.service_id) - self.assertFalse(ref['disabled']) - - def test_service_disabled_on_create_based_on_flag(self): - self.flags(enable_new_services=False) - host = 'foo' - binary = 'karbor-fake' - app = service.Service.create(host=host, binary=binary) - app.start() - app.stop() - ref = db.service_get(context.get_admin_context(), app.service_id) - db.service_destroy(context.get_admin_context(), app.service_id) - self.assertTrue(ref['disabled']) - - -class ServiceTestCase(base.TestCase): - """Test cases for Services.""" - - def setUp(self): - super(ServiceTestCase, self).setUp() - self.host = 'foo' - self.binary = 'karbor-fake' - self.topic = 'fake' - - def test_create(self): - app = service.Service.create(host=self.host, - binary=self.binary, - topic=self.topic) - - self.assertTrue(app) - - def test_report_state_newly_disconnected(self): - service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'id': 1} - with mock.patch.object(service, 'db') as mock_db: - mock_db.service_get_by_args.side_effect = exception.NotFound() - mock_db.service_create.return_value = service_ref - mock_db.service_get.side_effect = db_exc.DBConnectionError() - - serv = service.Service( - self.host, - self.binary, - self.topic, - 'karbor.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.report_state() - self.assertTrue(serv.model_disconnected) - self.assertFalse(mock_db.service_update.called) - - def test_report_state_disconnected_DBError(self): - service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'id': 1} - with mock.patch.object(service, 'db') as mock_db: - mock_db.service_get_by_args.side_effect = exception.NotFound() - mock_db.service_create.return_value = service_ref - mock_db.service_get.side_effect = db_exc.DBError() - - serv = service.Service( - self.host, - self.binary, - self.topic, - 'karbor.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.report_state() - self.assertTrue(serv.model_disconnected) - self.assertFalse(mock_db.service_update.called) - - def test_report_state_newly_connected(self): - service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'id': 1} - with mock.patch.object(service, 'db') as mock_db: - mock_db.service_get_by_args.side_effect = exception.NotFound() - mock_db.service_create.return_value = service_ref - mock_db.service_get.return_value = service_ref - - serv = service.Service( - self.host, - self.binary, - self.topic, - 'karbor.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.model_disconnected = True - serv.report_state() - - self.assertFalse(serv.model_disconnected) - self.assertTrue(mock_db.service_update.called) - - def test_report_state_manager_not_working(self): - service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'id': 1} - with mock.patch('karbor.db') as mock_db: - mock_db.service_get.return_value = service_ref - - serv = service.Service( - self.host, - self.binary, - self.topic, - 'karbor.tests.unit.test_service.FakeManager' - ) - serv.manager.is_working = mock.Mock(return_value=False) - serv.start() - serv.report_state() - - serv.manager.is_working.assert_called_once_with() - self.assertFalse(mock_db.service_update.called) - - def test_service_with_long_report_interval(self): - self.override_config('service_down_time', 10) - self.override_config('report_interval', 10) - service.Service.create( - binary="test_service", - manager="karbor.tests.unit.test_service.FakeManager") - self.assertEqual(25, CONF.service_down_time) - - @mock.patch.object(rpc, 'get_server') - @mock.patch('karbor.db') - def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc): - serv = service.Service( - self.host, - self.binary, - self.topic, - 'karbor.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.stop() - serv.wait() - serv.rpcserver.start.assert_called_once_with() - serv.rpcserver.stop.assert_called_once_with() - serv.rpcserver.wait.assert_called_once_with() - - -class TestWSGIService(base.TestCase): - - def setUp(self): - super(TestWSGIService, self).setUp() - - @mock.patch('oslo_service.wsgi.Loader') - def test_service_random_port(self, mock_loader): - test_service = service.WSGIService("test_service") - self.assertEqual(8799, test_service.port) - test_service.start() - self.assertNotEqual(0, test_service.port) - test_service.stop() - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_reset_pool_size_to_default(self, mock_loader): - test_service = service.WSGIService("test_service") - test_service.start() - - # Stopping the service, which in turn sets pool size to 0 - test_service.stop() - self.assertEqual(0, test_service.server._pool.size) - - # Resetting pool size to default - test_service.reset() - test_service.start() - self.assertEqual(cfg.CONF.wsgi_default_pool_size, - test_service.server._pool.size) - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_workers_set_default(self, mock_loader): - self.override_config('osapi_karbor_listen_port', - CONF.test_service_listen_port) - test_service = service.WSGIService("osapi_karbor") - self.assertEqual(processutils.get_worker_count(), - test_service.workers) - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_workers_set_good_user_setting(self, mock_loader): - self.override_config('osapi_karbor_listen_port', - CONF.test_service_listen_port) - self.override_config('osapi_karbor_workers', 8) - test_service = service.WSGIService("osapi_karbor") - self.assertEqual(8, test_service.workers) - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_workers_set_zero_user_setting(self, mock_loader): - self.override_config('osapi_karbor_listen_port', - CONF.test_service_listen_port) - self.override_config('osapi_karbor_workers', 0) - test_service = service.WSGIService("osapi_karbor") - # If a value less than 1 is used, defaults to number of procs - # available - self.assertEqual(processutils.get_worker_count(), - test_service.workers) - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_workers_set_negative_user_setting(self, mock_loader): - self.override_config('osapi_karbor_workers', -1) - self.assertRaises(exception.InvalidInput, - service.WSGIService, "osapi_karbor") - self.assertTrue(mock_loader.called) diff --git a/karbor/tests/unit/test_utils.py b/karbor/tests/unit/test_utils.py deleted file mode 100644 index d0fc2b5e..00000000 --- a/karbor/tests/unit/test_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from karbor.tests import base - -from karbor import utils - - -class WalkClassHierarchyTestCase(base.TestCase): - def test_walk_class_hierarchy(self): - class A(object): - pass - - class B(A): - pass - - class C(A): - pass - - class D(B): - pass - - class E(A): - pass - - class_pairs = zip((D, B, E), - utils.walk_class_hierarchy(A, encountered=[C])) - for actual, expected in class_pairs: - self.assertEqual(expected, actual) - - class_pairs = zip((D, B, C, E), utils.walk_class_hierarchy(A)) - for actual, expected in class_pairs: - self.assertEqual(expected, actual) diff --git a/karbor/tests/unit/wsgi/__init__.py b/karbor/tests/unit/wsgi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/utils.py b/karbor/utils.py deleted file mode 100644 index f5f4f043..00000000 --- a/karbor/utils.py +++ /dev/null @@ -1,215 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities and helper functions.""" -import ast -import contextlib -import os -import shutil -import six -import tempfile -import webob.exc - -from keystoneclient import discover as ks_discover -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import strutils -from oslo_utils import timeutils - -from karbor import exception -from karbor.i18n import _ -from stevedore import driver - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def find_config(config_path): - """Find a configuration file using the given hint. - - :param config_path: Full or relative path to the config. - :returns: Full path of the config, if it exists. - :raises karbor.exception.ConfigNotFound: - - """ - possible_locations = [ - config_path, - os.path.join("/var/lib/karbor", "etc", "karbor", config_path), - os.path.join("/var/lib/karbor", "etc", config_path), - os.path.join("/var/lib/karbor", config_path), - "/etc/karbor/%s" % config_path, - ] - - for path in possible_locations: - if os.path.exists(path): - return os.path.abspath(path) - - raise exception.ConfigNotFound(path=os.path.abspath(config_path)) - - -def service_is_up(service): - """Check whether a service is up based on last heartbeat.""" - last_heartbeat = service['updated_at'] or service['created_at'] - - elapsed = (timeutils.utcnow() - last_heartbeat).total_seconds() - return abs(elapsed) <= CONF.service_down_time - - -def remove_invalid_filter_options(context, filters, - allowed_search_options): - """Remove search options that are not valid for non-admin API/context.""" - - if context.is_admin: - # Allow all options - return - # Otherwise, strip out all unknown options - unknown_options = [opt for opt in filters - if opt not in allowed_search_options] - bad_options = ", ".join(unknown_options) - LOG.debug("Removing options '%s' from query.", bad_options) - for opt in unknown_options: - del filters[opt] - - -def check_filters(filters): - for k, v in filters.items(): - try: - filters[k] = ast.literal_eval(v) - except (ValueError, SyntaxError): - LOG.debug('Could not evaluate value %s, assuming string', v) - - -def is_valid_boolstr(val): - """Check if the provided string is a valid bool string or not.""" - val = str(val).lower() - return val in ('true', 'false', 'yes', 'no', 'y', 'n', '1', '0') - - -def get_bool_param(param_string, params): - param = params.get(param_string, False) - if not is_valid_boolstr(param): - msg = _('Value %(param)s for %(param_string)s is not a ' - 'boolean.') % {'param': param, 'param_string': param_string} - raise exception.InvalidParameterValue(err=msg) - - return strutils.bool_from_string(param, strict=True) - - -def load_class(namespace, plugin_name): - try: - LOG.debug('Start load plugin %s. ', plugin_name) - # Try to resolve plugin by name - mgr = driver.DriverManager(namespace, plugin_name) - return mgr.driver - except RuntimeError as e1: - # fallback to class name - try: - return importutils.import_class(plugin_name) - except ImportError as e2: - LOG.error("Error loading plugin by name, %s", e1) - LOG.error("Error loading plugin by class, %s", e2) - raise ImportError(_("Class not found.")) - - -def load_plugin(namespace, plugin_name, *args, **kwargs): - plugin_class = load_class(namespace, plugin_name) - return plugin_class(*args, **kwargs) - - -def get_auth_uri(v3=True): - # Look for the keystone auth_uri in the configuration. First we - # check the [clients_keystone] section, and if it is not set we - # look in [keystone_authtoken] - if cfg.CONF.clients_keystone.auth_uri: - discover = ks_discover.Discover( - auth_url=cfg.CONF.clients_keystone.auth_uri) - return discover.url_for('3.0') - else: - # Import auth_token to have keystone_authtoken settings setup. - importutils.import_module('keystonemiddleware.auth_token') - auth_uri = cfg.CONF.keystone_authtoken.www_authenticate_uri - return auth_uri.replace('v2.0', 'v3') if auth_uri and v3 else auth_uri - - -def validate_integer(value, name, min_value=None, max_value=None): - """Make sure that value is a valid integer, potentially within range. - - :param value: the value of the integer - :param name: the name of the integer - :param min_length: the min_length of the integer - :param max_length: the max_length of the integer - :returns: integer - """ - try: - value = int(value) - except (TypeError, ValueError, UnicodeEncodeError): - raise webob.exc.HTTPBadRequest(explanation=( - _('%s must be an integer.') % name)) - - if min_value is not None and value < min_value: - raise webob.exc.HTTPBadRequest( - explanation=(_('%(value_name)s must be >= %(min_value)d') % - {'value_name': name, 'min_value': min_value})) - if max_value is not None and value > max_value: - raise webob.exc.HTTPBadRequest( - explanation=(_('%(value_name)s must be <= %(max_value)d') % - {'value_name': name, 'max_value': max_value})) - - return value - - -def walk_class_hierarchy(clazz, encountered=None): - """Walk class hierarchy, yielding most derived classes first.""" - if not encountered: - encountered = [] - for subclass in clazz.__subclasses__(): - if subclass not in encountered: - encountered.append(subclass) - # drill down to leaves first - for subsubclass in walk_class_hierarchy(subclass, encountered): - yield subsubclass - yield subclass - - -@contextlib.contextmanager -def tempdir(**kwargs): - tmpdir = tempfile.mkdtemp(**kwargs) - try: - yield tmpdir - finally: - try: - shutil.rmtree(tmpdir) - except OSError as e: - LOG.debug('Could not remove tmpdir: %s', - six.text_type(e)) - - -class DoNothing(str): - """Class that literally does nothing. - - We inherit from str in case it's called with json.dumps. - """ - def __call__(self, *args, **kwargs): - return self - - def __getattr__(self, name): - return self - - -DO_NOTHING = DoNothing() - - -def notifications_enabled(conf): - """Check if oslo notifications are enabled.""" - notifications_driver = set(conf.oslo_messaging_notifications.driver) - return notifications_driver and notifications_driver != {'noop'} diff --git a/karbor/version.py b/karbor/version.py deleted file mode 100644 index 19d4b28e..00000000 --- a/karbor/version.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pbr import version as pbr_version - -KARBOR_VENDOR = "OpenStack Foundation" -KARBOR_PRODUCT = "OpenStack Karbor" -KARBOR_PACKAGE = None # OS distro package version suffix - -loaded = False -version_info = pbr_version.VersionInfo('Karbor') -version_string = version_info.version_string diff --git a/karbor/wsgi/__init__.py b/karbor/wsgi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/karbor/wsgi/common.py b/karbor/wsgi/common.py deleted file mode 100644 index 130451b0..00000000 --- a/karbor/wsgi/common.py +++ /dev/null @@ -1,159 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility methods for working with WSGI servers.""" - -from oslo_config import cfg -from oslo_log import log as logging - -import webob.dec -import webob.exc - -from karbor.i18n import _ - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class Request(webob.Request): - pass - - -class Application(object): - """Base WSGI application wrapper. Subclasses need to implement __call__.""" - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [app:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [app:wadl] - latest_version = 1.3 - paste.app_factory = karbor.api.fancy_api:Wadl.factory - - which would result in a call to the `Wadl` class as - - import karbor.api.fancy_api - fancy_api.Wadl(latest_version='1.3') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - return cls(**local_config) - - def __call__(self, environ, start_response): - """Subclasses will probably want to implement __call__ like this: - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - # Any of the following objects work as responses: - - # Option 1: simple string - res = 'message\n' - - # Option 2: a nicely formatted HTTP exception page - res = exc.HTTPForbidden(explanation='Nice try') - - # Option 3: a webob Response object (in case you need to play with - # headers, or you want to be treated like an iterable) - res = Response(); - res.app_iter = open('somefile') - - # Option 4: any wsgi app to be run next - res = self.application - - # Option 5: you can get a Response object for a wsgi app, too, to - # play with headers etc - res = req.get_response(self.application) - - # You can then just return your response... - return res - # ... or set req.response and return None. - req.response = res - - See the end of http://pythonpaste.org/webob/modules/dec.html - for more info. - - """ - raise NotImplementedError(_('You must implement __call__')) - - -class Middleware(Application): - """Base WSGI middleware. - - These classes require an application to be - initialized that will be called next. By default the middleware will - simply call its wrapped app, or you can override __call__ to customize its - behavior. - - """ - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [filter:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [filter:analytics] - redis_host = 127.0.0.1 - paste.filter_factory = karbor.api.analytics:Analytics.factory - - which would result in a call to the `Analytics` class as - - import karbor.api.analytics - analytics.Analytics(app_from_paste, redis_host='127.0.0.1') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - def _factory(app): - return cls(app, **local_config) - return _factory - - def __init__(self, application): - super(Middleware, self).__init__() - self.application = application - - def process_request(self, req): - """Called on each request. - - If this returns None, the next application down the stack will be - executed. If it returns a response then that response will be returned - and execution will stop here. - - """ - return None - - def process_response(self, response): - """Do whatever you'd like to the response.""" - return response - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - response = self.process_request(req) - if response: - return response - response = req.get_response(self.application) - return self.process_response(response) diff --git a/karbor/wsgi/eventlet_server.py b/karbor/wsgi/eventlet_server.py deleted file mode 100644 index e8a2c9f3..00000000 --- a/karbor/wsgi/eventlet_server.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Methods for working with eventlet WSGI servers.""" - -from __future__ import print_function - -import socket - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import wsgi -from oslo_utils import netutils - - -socket_opts = [ - cfg.BoolOpt('tcp_keepalive', - default=True, - help="Sets the value of TCP_KEEPALIVE (True/False) for each " - "server socket."), - cfg.IntOpt('tcp_keepalive_interval', - help="Sets the value of TCP_KEEPINTVL in seconds for each " - "server socket. Not supported on OS X."), - cfg.IntOpt('tcp_keepalive_count', - help="Sets the value of TCP_KEEPCNT for each " - "server socket. Not supported on OS X."), -] - - -CONF = cfg.CONF -CONF.register_opts(socket_opts) - -LOG = logging.getLogger(__name__) - - -class Server(wsgi.Server): - """Server class to manage a WSGI server, serving a WSGI application.""" - - def _set_socket_opts(self, _socket): - _socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - # NOTE(praneshp): Call set_tcp_keepalive in oslo to set - # tcp keepalive parameters. Sockets can hang around forever - # without keepalive - netutils.set_tcp_keepalive(_socket, - self.conf.tcp_keepalive, - self.conf.tcp_keepidle, - self.conf.tcp_keepalive_count, - self.conf.tcp_keepalive_interval) - - return _socket diff --git a/karbor/wsgi/wsgi.py b/karbor/wsgi/wsgi.py deleted file mode 100644 index 7b1096a8..00000000 --- a/karbor/wsgi/wsgi.py +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Karbor OS API WSGI application.""" - - -import sys -import warnings - -from karbor import objects - -warnings.simplefilter('once', DeprecationWarning) - -from oslo_config import cfg # noqa: E402 -from oslo_log import log as logging # noqa: E402 -from oslo_service import wsgi # noqa: E402 - -from karbor import i18n # noqa: E402 -i18n.enable_lazy() - -# Need to register global_opts -from karbor.common import config # noqa -from karbor import rpc # noqa: E402 -from karbor import version # noqa: E402 - -CONF = cfg.CONF - - -def initialize_application(): - objects.register_all() - CONF(sys.argv[1:], project='karbor', - version=version.version_string()) - logging.setup(CONF, "karbor") - - rpc.init(CONF) - return wsgi.Loader(CONF).load_app(name='osapi_karbor') diff --git a/lower-constraints.txt b/lower-constraints.txt deleted file mode 100644 index c793383b..00000000 --- a/lower-constraints.txt +++ /dev/null @@ -1,148 +0,0 @@ -abclient==0.2.3 -alabaster==0.7.10 -alembic==0.9.8 -amqp==2.2.2 -appdirs==1.4.3 -asn1crypto==0.24.0 -automaton==1.14.0 -Babel==2.3.4 -botocore==1.5.1 -cachetools==2.0.1 -certifi==2018.1.18 -cffi==1.11.5 -chardet==3.0.4 -cliff==2.11.0 -cmd2==0.8.1 -contextlib2==0.5.5 -coverage==4.0 -croniter==0.3.4 -cryptography==2.1.4 -debtcollector==1.19.0 -decorator==4.2.1 -deprecation==2.0 -docutils==0.14 -dogpile.cache==0.6.5 -dulwich==0.19.0 -enum-compat==0.0.2 -eventlet==0.18.2 -extras==1.0.0 -fasteners==0.14.1 -fixtures==3.0.0 -futurist==1.8.0 -google-auth==1.4.1 -greenlet==0.4.10 -icalendar==3.10 -idna==2.6 -imagesize==1.0.0 -ipaddress==1.0.19 -iso8601==0.1.12 -Jinja2==2.10 -jmespath==0.9.3 -jsonpatch==1.21 -jsonpointer==2.0 -jsonschema==2.6.0 -keystoneauth1==3.4.0 -keystonemiddleware==4.18.0 -kombu==4.1.0 -kubernetes==5.0.0 -linecache2==1.0.0 -Mako==1.0.7 -MarkupSafe==1.0 -mock==2.0.0 -monotonic==1.4 -mox3==0.25.0 -msgpack==0.5.6 -munch==2.2.0 -netaddr==0.7.19 -netifaces==0.10.6 -networkx==1.11 -oauthlib==2.0.6 -openstackdocstheme==2.0.0 -openstacksdk==0.12.0 -os-api-ref==1.4.0 -os-client-config==1.29.0 -os-service-types==1.2.0 -osc-lib==1.10.0 -oslo.cache==1.29.0 -oslo.concurrency==3.26.0 -oslo.config==5.2.0 -oslo.context==2.19.2 -oslo.db==4.27.0 -oslo.i18n==3.15.3 -oslo.log==3.36.0 -oslo.messaging==5.29.0 -oslo.middleware==3.31.0 -oslo.policy==1.30.0 -oslo.serialization==2.18.0 -oslo.service==1.24.0 -oslo.upgradecheck==0.1.0 -oslo.utils==3.36.0 -oslo.versionedobjects==1.31.2 -oslotest==3.2.0 -packaging==17.1 -Paste==2.0.2 -PasteDeploy==1.5.0 -pbr==2.0.0 -pika-pool==0.1.3 -pika==0.10.0 -prettytable==0.7.2 -pyasn1-modules==0.2.1 -pyasn1==0.4.2 -pycadf==2.7.0 -pycparser==2.18 -Pygments==2.2.0 -pyinotify==0.9.6 -pyOpenSSL==17.5.0 -pyparsing==2.2.0 -pyperclip==1.6.0 -python-cinderclient==3.3.0 -python-dateutil==2.7.0 -python-editor==1.0.3 -python-freezerclient==1.3.0 -python-glanceclient==2.8.0 -python-karborclient==0.6.0 -python-keystoneclient==3.15.0 -python-manilaclient==1.16.0 -python-mimeparse==1.6.0 -python-mistralclient==3.3.0 -python-neutronclient==6.7.0 -python-novaclient==9.1.0 -python-subunit==1.0.0 -python-swiftclient==3.2.0 -python-troveclient==2.2.0 -pytz==2018.3 -PyYAML==3.12 -reno==2.5.0 -repoze.lru==0.7 -requests-oauthlib==0.8.0 -requests==2.14.2 -requestsexceptions==1.4.0 -rfc3986==1.1.0 -Routes==2.3.1 -rsa==3.4.2 -simplejson==3.13.2 -six==1.10.0 -snowballstemmer==1.2.1 -Sphinx==2.0.0 -sphinxcontrib-apidoc==0.2.0 -sphinxcontrib-websupport==1.0.1 -sqlalchemy-migrate==0.11.0 -SQLAlchemy==1.0.10 -sqlparse==0.2.4 -statsd==3.2.2 -stestr==2.0.0 -stevedore==1.20.0 -taskflow==2.16.0 -Tempita==0.5.2 -tenacity==4.9.0 -testresources==2.0.1 -testscenarios==0.4 -testtools==2.2.0 -traceback2==1.4.0 -unittest2==1.1.0 -urllib3==1.22 -vine==1.1.4 -warlock==1.3.1 -WebOb==1.7.1 -websocket-client==0.47.0 -wrapt==1.10.11 diff --git a/releasenotes/notes/add-upgrade-check-framework-7e4f4c1b31f15272.yaml b/releasenotes/notes/add-upgrade-check-framework-7e4f4c1b31f15272.yaml deleted file mode 100644 index de82be59..00000000 --- a/releasenotes/notes/add-upgrade-check-framework-7e4f4c1b31f15272.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -prelude: > - Added new tool ``karbor-status upgrade check``. -features: - - | - New framework for ``karbor-status upgrade check`` command is added. - This framework allows adding various checks which can be run before a - Karbor upgrade to ensure if the upgrade can be performed safely. -upgrade: - - | - Operator can now use new CLI tool ``karbor-status upgrade check`` - to check if Karbor deployment can be safely upgraded from - N-1 to N release. diff --git a/releasenotes/notes/added-reno-releasenotes-ae36507a78246a50.yaml b/releasenotes/notes/added-reno-releasenotes-ae36507a78246a50.yaml deleted file mode 100644 index 93f21926..00000000 --- a/releasenotes/notes/added-reno-releasenotes-ae36507a78246a50.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Started using Reno for release notes - diff --git a/releasenotes/notes/checkpoint-status-reset-d714b4a04da2f44d.yaml b/releasenotes/notes/checkpoint-status-reset-d714b4a04da2f44d.yaml deleted file mode 100644 index aee4633f..00000000 --- a/releasenotes/notes/checkpoint-status-reset-d714b4a04da2f44d.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added support for checkpoint state reset by admin and owner. diff --git a/releasenotes/notes/checkpoints-listing-with-all-tenants-d7b0d1a149cb690d.yaml b/releasenotes/notes/checkpoints-listing-with-all-tenants-d7b0d1a149cb690d.yaml deleted file mode 100644 index fe08e3b3..00000000 --- a/releasenotes/notes/checkpoints-listing-with-all-tenants-d7b0d1a149cb690d.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Add support for listing checkpoints by admin with all_tenants. diff --git a/releasenotes/notes/drop-py-2-7-84b4daca6e383f97.yaml b/releasenotes/notes/drop-py-2-7-84b4daca6e383f97.yaml deleted file mode 100644 index 5d9e74d8..00000000 --- a/releasenotes/notes/drop-py-2-7-84b4daca6e383f97.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Python 2.7 support has been dropped. Last release of Karbor - to support python 2.7 is OpenStack Train. The minimum version of Python now - supported by Karbor is Python 3.6. diff --git a/releasenotes/notes/fix-checkpoint-list-c0435fcbdf26858b.yaml b/releasenotes/notes/fix-checkpoint-list-c0435fcbdf26858b.yaml deleted file mode 100644 index 587fe648..00000000 --- a/releasenotes/notes/fix-checkpoint-list-c0435fcbdf26858b.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fix checkpoint listing bug, which caused results to be not filtered - correctly according to set filters. diff --git a/releasenotes/notes/ical-rfc24445-b98313a8c3eefb62.yaml b/releasenotes/notes/ical-rfc24445-b98313a8c3eefb62.yaml deleted file mode 100644 index e1860d6a..00000000 --- a/releasenotes/notes/ical-rfc24445-b98313a8c3eefb62.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added RFC2445 (iCal) format for time triggers. diff --git a/releasenotes/notes/protection-plugin-api-063fd84b1f37d8e2.yaml b/releasenotes/notes/protection-plugin-api-063fd84b1f37d8e2.yaml deleted file mode 100644 index b030e655..00000000 --- a/releasenotes/notes/protection-plugin-api-063fd84b1f37d8e2.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Revised Protection Plugin interface which now supports multiple hooks for - each operation. Hooks dictate when specific parts of the protection plugin - code runs in relation to the operation phase and other resource's state. -deprecations: - - | - Old Protection Plugin interface and BaseProtectionPlugin are deprecated. diff --git a/releasenotes/notes/protection-plugins-adjust-d228139bd2f19765.yaml b/releasenotes/notes/protection-plugins-adjust-d228139bd2f19765.yaml deleted file mode 100644 index 5e1e0e89..00000000 --- a/releasenotes/notes/protection-plugins-adjust-d228139bd2f19765.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Revised Cinder protection plugin, which takes a snapshot, backs up from the - snapshot, and deletes the snapshot afterwards. - - | - Revised Glance protection plugin, which uploads the glance image in chunks - to the bank. diff --git a/releasenotes/notes/restore-auth-79cd504bc0cc3712.yaml b/releasenotes/notes/restore-auth-79cd504bc0cc3712.yaml deleted file mode 100644 index 56efaf6b..00000000 --- a/releasenotes/notes/restore-auth-79cd504bc0cc3712.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - Restore auth parameters now reside in restore_auth instead of restore - parameters. diff --git a/releasenotes/notes/s3-bank-plugin-b55ca44739d492b0.yaml b/releasenotes/notes/s3-bank-plugin-b55ca44739d492b0.yaml deleted file mode 100755 index 5c000ee0..00000000 --- a/releasenotes/notes/s3-bank-plugin-b55ca44739d492b0.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Add support for using S3 comptatible storage as bank plugin. diff --git a/releasenotes/notes/use-oslo-config-generator-f2a9be9e71d90b1f.yaml b/releasenotes/notes/use-oslo-config-generator-f2a9be9e71d90b1f.yaml deleted file mode 100644 index 320e95fe..00000000 --- a/releasenotes/notes/use-oslo-config-generator-f2a9be9e71d90b1f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - oslo-config-generator is now used to generate a - karbor.conf.sample file diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 77bbf020..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,254 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'karbor' -copyright = u'2016, Karbor Developers' - -# Release notes are version independent. -# The short X.Y version. -version = '' -# The full version, including alpha/beta/rc tags. -release = '' - -# openstackdocstheme options -repository_name = 'openstack/karbor' -bug_project = project.lower() -bug_tag = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'KarborReleaseNotestdoc' - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). - -latex_documents = [ - ('index', 'Karbor.tex', u'Karbor Release Notes Documentation', - u'Karbor developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'Karbor', - u'Karbor Release Notes Documentation', - [u'Karbor developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'Karbor', - u'Karbor Release Notes Documentation', - u'Karbor developers', 'Karbor', - 'One line description of project.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index ddc2c595..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -Welcome to Karbor Release Notes documentation! -============================================== - -Contents -======== - -.. toctree:: - :maxdepth: 1 - - unreleased - train - stein - rocky - queens - pike - newton - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po deleted file mode 100644 index 39e5efd7..00000000 --- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,163 +0,0 @@ -# Andi Chandler , 2017. #zanata -# Andi Chandler , 2018. #zanata -# Andi Chandler , 2019. #zanata -msgid "" -msgstr "" -"Project-Id-Version: karbor\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-03-19 09:58+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2019-12-21 02:33+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en_GB\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "0.1.0" -msgstr "0.1.0" - -msgid "0.2.0" -msgstr "0.2.0" - -msgid "0.4.0" -msgstr "0.4.0" - -msgid "1.2.0" -msgstr "1.2.0" - -msgid ":ref:`genindex`" -msgstr ":ref:`genindex`" - -msgid ":ref:`search`" -msgstr ":ref:`search`" - -msgid "Add support for listing checkpoints by admin with all_tenants." -msgstr "Add support for listing checkpoints by admin with all_tenants." - -msgid "Add support for using S3 comptatible storage as bank plugin." -msgstr "Add support for using S3 compatible storage as bank plugin." - -msgid "Added RFC2445 (iCal) format for time triggers." -msgstr "Added RFC2445 (iCal) format for time triggers." - -msgid "Added new tool ``karbor-status upgrade check``." -msgstr "Added new tool ``karbor-status upgrade check``." - -msgid "Added support for checkpoint state reset by admin and owner." -msgstr "Added support for checkpoint state reset by admin and owner." - -msgid "Bug Fixes" -msgstr "Bug Fixes" - -msgid "Contents" -msgstr "Contents" - -msgid "Current Series Release Notes" -msgstr "Current Series Release Notes" - -msgid "Deprecation Notes" -msgstr "Deprecation Notes" - -msgid "" -"Fix checkpoint listing bug, which caused results to be not filtered " -"correctly according to set filters." -msgstr "" -"Fix checkpoint listing bug, which caused results to be not filtered " -"correctly according to set filters." - -msgid "Indices and tables" -msgstr "Indices and tables" - -msgid "New Features" -msgstr "New Features" - -msgid "" -"New framework for ``karbor-status upgrade check`` command is added. This " -"framework allows adding various checks which can be run before a Karbor " -"upgrade to ensure if the upgrade can be performed safely." -msgstr "" -"New framework for ``karbor-status upgrade check`` command is added. This " -"framework allows adding various checks which can be run before a Karbor " -"upgrade to ensure if the upgrade can be performed safely." - -msgid "Newton Series Release Notes" -msgstr "Newton Series Release Notes" - -msgid "" -"Old Protection Plugin interface and BaseProtectionPlugin are deprecated." -msgstr "" -"Old Protection Plugin interface and BaseProtectionPlugin are deprecated." - -msgid "" -"Operator can now use new CLI tool ``karbor-status upgrade check`` to check " -"if Karbor deployment can be safely upgraded from N-1 to N release." -msgstr "" -"Operator can now use new CLI tool ``karbor-status upgrade check`` to check " -"if Karbor deployment can be safely upgraded from N-1 to N release." - -msgid "Other Notes" -msgstr "Other Notes" - -msgid "Pike Series Release Notes" -msgstr "Pike Series Release Notes" - -msgid "Prelude" -msgstr "Prelude" - -msgid "Queens Series Release Notes" -msgstr "Queens Series Release Notes" - -msgid "" -"Restore auth parameters now reside in restore_auth instead of restore " -"parameters." -msgstr "" -"Restore auth parameters now reside in restore_auth instead of restore " -"parameters." - -msgid "" -"Revised Cinder protection plugin, which takes a snapshot, backs up from the " -"snapshot, and deletes the snapshot afterwards." -msgstr "" -"Revised Cinder protection plugin, which takes a snapshot, backs up from the " -"snapshot, and deletes the snapshot afterwards." - -msgid "" -"Revised Glance protection plugin, which uploads the glance image in chunks " -"to the bank." -msgstr "" -"Revised Glance protection plugin, which uploads the glance image in chunks " -"to the bank." - -msgid "" -"Revised Protection Plugin interface which now supports multiple hooks for " -"each operation. Hooks dictate when specific parts of the protection plugin " -"code runs in relation to the operation phase and other resource's state." -msgstr "" -"Revised Protection Plugin interface which now supports multiple hooks for " -"each operation. Hooks dictate when specific parts of the protection plugin " -"code runs in relation to the operation phase and other resource's state." - -msgid "Rocky Series Release Notes" -msgstr "Rocky Series Release Notes" - -msgid "Started using Reno for release notes" -msgstr "Started using Reno for release notes" - -msgid "Stein Series Release Notes" -msgstr "Stein Series Release Notes" - -msgid "Train Series Release Notes" -msgstr "Train Series Release Notes" - -msgid "Upgrade Notes" -msgstr "Upgrade Notes" - -msgid "Welcome to Karbor Release Notes documentation!" -msgstr "Welcome to Karbor Release Notes documentation!" - -msgid "oslo-config-generator is now used to generate a karbor.conf.sample file" -msgstr "" -"oslo-config-generator is now used to generate a karbor.conf.sample file" diff --git a/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po deleted file mode 100644 index c07b6fcf..00000000 --- a/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,144 +0,0 @@ -# SEOKJAE BARK , 2017. #zanata -# Hongjae Kim , 2019. #zanata -msgid "" -msgstr "" -"Project-Id-Version: karbor\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2019-11-04 11:06+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2019-11-02 08:11+0000\n" -"Last-Translator: Hongjae Kim \n" -"Language-Team: Korean (South Korea)\n" -"Language: ko_KR\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -msgid "0.1.0" -msgstr "0.1.0" - -msgid "0.2.0" -msgstr "0.2.0" - -msgid "0.4.0" -msgstr "0.4.0" - -msgid "1.2.0" -msgstr "1.2.0" - -msgid ":ref:`genindex`" -msgstr ":ref:`genindex`" - -msgid ":ref:`search`" -msgstr ":ref:`search`" - -msgid "Add support for listing checkpoints by admin with all_tenants." -msgstr "all_tenants로 관리자가 체크포인트를 나열하기 위한 지원 추가." - -msgid "Add support for using S3 comptatible storage as bank plugin." -msgstr "S3 호환 스토리지를 은행 플러그인으로 사용하기 위한 지원 추가." - -msgid "Added RFC2445 (iCal) format for time triggers." -msgstr "시간 트리거에 대한 RFC2445(iCal) 형식 추가." - -msgid "Added new tool ``karbor-status upgrade check``." -msgstr "새로운 도구 \"카보르 상태 업그레이드 점검\" 추가." - -msgid "Added support for checkpoint state reset by admin and owner." -msgstr "관리자 및 소유자의 체크포인트 상태 재설정에 대한 지원 추가." - -msgid "Bug Fixes" -msgstr "버그 고침" - -msgid "Contents" -msgstr "내용" - -msgid "Current Series Release Notes" -msgstr "최신 시리즈에 대한 릴리즈 노트" - -msgid "Deprecation Notes" -msgstr "감가 상각" - -msgid "" -"Fix checkpoint listing bug, which caused results to be not filtered " -"correctly according to set filters." -msgstr "" -"설정된 필터에 따라 결과가 올바르게 필터링되지 않는 체크포인트 목록 버그를 수" -"정하십시오." - -msgid "Indices and tables" -msgstr "인덱스 및 테이블" - -msgid "New Features" -msgstr "새로운 기능" - -msgid "" -"New framework for ``karbor-status upgrade check`` command is added. This " -"framework allows adding various checks which can be run before a Karbor " -"upgrade to ensure if the upgrade can be performed safely." -msgstr "" -"``karbor-status upgrade check\" 명령의 새로운 프레임워크가 추가되었다. 이 프" -"레임워크는 업그레이드가 안전하게 수행될 수 있는지 확인하기 위해 카르보 업그레" -"이드 전에 실행할 수 있는 다양한 검사를 추가할 수 있다." - -msgid "Newton Series Release Notes" -msgstr "Newton 시리즈에 대한 릴리즈 노트" - -msgid "" -"Old Protection Plugin interface and BaseProtectionPlugin are deprecated." -msgstr "" -"이전 보호 플러그인 인터페이스와 BaseProtectionPlugin은 더 이상 사용되지 않는" -"다." - -msgid "" -"Operator can now use new CLI tool ``karbor-status upgrade check`` to check " -"if Karbor deployment can be safely upgraded from N-1 to N release." -msgstr "" -"운영자는 이제 새로운 CLI 도구 \"karbor-status upgrade check\"를 사용하여 카보" -"르 배치를 N-1에서 N 릴리스로 안전하게 업그레이드할 수 있는지 확인할 수 있다." - -msgid "Other Notes" -msgstr "기타 노트" - -msgid "Prelude" -msgstr "서곡" - -msgid "" -"Restore auth parameters now reside in restore_auth instead of restore " -"parameters." -msgstr "" -"이제 복원 인증 매개 변수가 복원 매개 변수 대신 restore_auth에 있는 경우." - -msgid "" -"Revised Cinder protection plugin, which takes a snapshot, backs up from the " -"snapshot, and deletes the snapshot afterwards." -msgstr "" -"스냅샷을 생성한 후 스냅샷에서 백업하고 이후에 스냅샷을 삭제하는 수정 Cinder " -"보호 플러그인" - -msgid "" -"Revised Glance protection plugin, which uploads the glance image in chunks " -"to the bank." -msgstr "눈금 이미지를 청크로 은행에 업로드하는 수정 글랜스 보호 플러그인." - -msgid "" -"Revised Protection Plugin interface which now supports multiple hooks for " -"each operation. Hooks dictate when specific parts of the protection plugin " -"code runs in relation to the operation phase and other resource's state." -msgstr "" -"이제 각 작업에 대해 여러 후크를 지원하는 수정된 보호 플러그인 인터페이스 후크" -"는 보호 플러그인 코드의 특정 부분이 작동 단계 및 다른 리소스의 상태와 관련하" -"여 실행될 때 지시한다." - -msgid "Started using Reno for release notes" -msgstr "릴리즈 노트 관리를 위한 Reno사용을 시작합니다" - -msgid "Upgrade Notes" -msgstr "노트 업그레이드" - -msgid "Welcome to Karbor Release Notes documentation!" -msgstr "Karbor릴리즈 노트 문서에 오신 것을 환영합니다!" - -msgid "oslo-config-generator is now used to generate a karbor.conf.sample file" -msgstr "oslo-config-generator는 karbor conf sample file을 만드는데 사용됩니다" diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index be218598..00000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ - Newton Series Release Notes -============================ - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst deleted file mode 100644 index e43bfc0c..00000000 --- a/releasenotes/source/pike.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Pike Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/pike diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst deleted file mode 100644 index 36ac6160..00000000 --- a/releasenotes/source/queens.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Queens Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/queens diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst deleted file mode 100644 index 40dd517b..00000000 --- a/releasenotes/source/rocky.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Rocky Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/rocky diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst deleted file mode 100644 index efaceb66..00000000 --- a/releasenotes/source/stein.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Stein Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst deleted file mode 100644 index 58390039..00000000 --- a/releasenotes/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Train Series Release Notes -========================== - -.. release-notes:: - :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 875030f9..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -Current Series Release Notes -============================ - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 50a82f4b..00000000 --- a/requirements.txt +++ /dev/null @@ -1,49 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -botocore>=1.5.1 # Apache-2.0 -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -Babel!=2.4.0,>=2.3.4 # BSD -croniter>=0.3.4 # MIT License -eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT -greenlet>=0.4.10 # MIT -icalendar>=3.10 # BSD -jsonschema>=2.6.0 # MIT -keystoneauth1>=3.4.0 # Apache-2.0 -keystonemiddleware>=4.18.0 # Apache-2.0 -kubernetes>=5.0.0 # Apache-2.0 -python-manilaclient>=1.16.0 # Apache-2.0 -oslo.config>=5.2.0 # Apache-2.0 -oslo.concurrency>=3.26.0 # Apache-2.0 -oslo.context>=2.19.2 # Apache-2.0 -oslo.db>=4.27.0 # Apache-2.0 -oslo.log>=3.36.0 # Apache-2.0 -oslo.messaging>=5.29.0 # Apache-2.0 -oslo.middleware>=3.31.0 # Apache-2.0 -oslo.policy>=1.30.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 -oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 -oslo.upgradecheck>=0.1.0 # Apache-2.0 -oslo.versionedobjects>=1.31.2 # Apache-2.0 -Paste>=2.0.2 # MIT -PasteDeploy>=1.5.0 # MIT -python-freezerclient>=1.3.0 # Apache-2.0 -python-glanceclient>=2.8.0 # Apache-2.0 -python-novaclient>=9.1.0 # Apache-2.0 -python-cinderclient>=3.3.0 # Apache-2.0 -python-troveclient>=2.2.0 # Apache-2.0 -requests>=2.14.2 # Apache-2.0 -Routes>=2.3.1 # MIT -python-neutronclient>=6.7.0 # Apache-2.0 -six>=1.10.0 # MIT -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT -sqlalchemy-migrate>=0.11.0 # Apache-2.0 -stevedore>=1.20.0 # Apache-2.0 -taskflow>=2.16.0 # Apache-2.0 -WebOb>=1.7.1 # MIT -oslo.i18n>=3.15.3 # Apache-2.0 -python-swiftclient>=3.2.0 # Apache-2.0 -python-karborclient>=0.6.0 # Apache-2.0 -abclient>=0.2.3 # Apache-2.0 -futurist>=1.8.0 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index d6db7848..00000000 --- a/setup.cfg +++ /dev/null @@ -1,96 +0,0 @@ -[metadata] -name = karbor -summary = Application Data Protection as a Service for OpenStack -description-file = - README.rst -author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://docs.openstack.org/karbor/latest/ -python-requires = >=3.6 -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: Implementation :: CPython - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.6 - Programming Language :: Python :: 3.7 - -[files] -packages = - karbor -data_files = - etc/karbor = etc/api-paste.ini - etc/karbor/providers.d = etc/providers.d/* - -[entry_points] -console_scripts = - karbor-api = karbor.cmd.api:main - karbor-manage = karbor.cmd.manage:main - karbor-operationengine = karbor.cmd.operationengine:main - karbor-protection = karbor.cmd.protection:main - karbor-status = karbor.cmd.status:main -oslo.config.opts = - karbor.common.opts = karbor.common.opts:list_opts -oslo.policy.enforcer = - karbor = karbor.policy:get_enforcer -oslo.policy.policies = - # The sample policies will be ordered by entry point and then by list - # returned from that entry point. If more control is desired split out each - # list_rules method into a separate entry point rather than using the - # aggregate method. - karbor = karbor.policies:list_rules -wsgi_scripts = - karbor-wsgi = karbor.wsgi.wsgi:initialize_application -karbor.database.migration_backend = - sqlalchemy = oslo_db.sqlalchemy.migration -karbor.protections = - karbor-swift-bank-plugin = karbor.services.protection.bank_plugins.swift_bank_plugin:SwiftBankPlugin - karbor-fs-bank-plugin = karbor.services.protection.bank_plugins.file_system_bank_plugin:FileSystemBankPlugin - karbor-s3-bank-plugin = karbor.services.protection.bank_plugins.s3_bank_plugin:S3BankPlugin - karbor-volume-freezer-plugin = karbor.services.protection.protection_plugins.volume.volume_freezer_plugin:FreezerProtectionPlugin - karbor-volume-protection-plugin = karbor.services.protection.protection_plugins.volume.cinder_protection_plugin:CinderBackupProtectionPlugin - karbor-volume-snapshot-plugin = karbor.services.protection.protection_plugins.volume.volume_snapshot_plugin:VolumeSnapshotProtectionPlugin - karbor-volume-glance-plugin = karbor.services.protection.protection_plugins.volume.volume_glance_plugin:VolumeGlanceProtectionPlugin - karbor-image-protection-plugin = karbor.services.protection.protection_plugins.image.image_protection_plugin:GlanceProtectionPlugin - karbor-server-protection-plugin = karbor.services.protection.protection_plugins.server.nova_protection_plugin:NovaProtectionPlugin - karbor-share-protection-plugin = karbor.services.protection.protection_plugins.share.share_snapshot_plugin:ManilaSnapshotProtectionPlugin - karbor-noop-protection-plugin = karbor.services.protection.protection_plugins.noop_plugin:NoopProtectionPlugin - karbor-network-protection-plugin = karbor.services.protection.protection_plugins.network.neutron_protection_plugin:NeutronProtectionPlugin - karbor-database-protection-plugin = karbor.services.protection.protection_plugins.database.database_backup_plugin:DatabaseBackupProtectionPlugin - karbor-pod-protection-plugin = karbor.services.protection.protection_plugins.pod.pod_protection_plugin:PodProtectionPlugin -karbor.provider = - provider-registry = karbor.services.protection.provider:ProviderRegistry -karbor.protectables = - project = karbor.services.protection.protectable_plugins.project:ProjectProtectablePlugin - server = karbor.services.protection.protectable_plugins.server:ServerProtectablePlugin - volume = karbor.services.protection.protectable_plugins.volume:VolumeProtectablePlugin - image = karbor.services.protection.protectable_plugins.image:ImageProtectablePlugin - share = karbor.services.protection.protectable_plugins.share:ShareProtectablePlugin - network = karbor.services.protection.protectable_plugins.network:NetworkProtectablePlugin - database = karbor.services.protection.protectable_plugins.database:DatabaseInstanceProtectablePlugin - pod = karbor.services.protection.protectable_plugins.pod:K8sPodProtectablePlugin -karbor.operationengine.engine.timetrigger.time_format = - crontab = karbor.services.operationengine.engine.triggers.timetrigger.timeformats.crontab_time:Crontab - calendar = karbor.services.operationengine.engine.triggers.timetrigger.timeformats.calendar_time:ICal -karbor.operationengine.engine.executor = - thread_pool = karbor.services.operationengine.engine.executors.thread_pool_executor:ThreadPoolExecutor - green_thread = karbor.services.operationengine.engine.executors.green_thread_executor:GreenThreadExecutor - -[compile_catalog] -directory = karbor/locale -domain = karbor - -[update_catalog] -domain = karbor -output_dir = karbor/locale -input_file = karbor/locale/karbor.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = karbor/locale/karbor.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index cd35c3c3..00000000 --- a/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index bb732247..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,31 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -hacking!=3.0.1,<3.1.0 # Apache-2.0 -botocore>=1.5.1 # Apache-2.0 -coverage!=4.4,>=4.0 # Apache-2.0 -croniter>=0.3.4 # MIT License -python-subunit>=1.0.0 # Apache-2.0/BSD -oslotest>=3.2.0 # Apache-2.0 -stestr>=2.0.0 # Apache-2.0 -taskflow>=2.16.0 # Apache-2.0 -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=2.2.0 # MIT -kubernetes>=5.0.0 # Apache-2.0 -python-swiftclient>=3.2.0 # Apache-2.0 -python-glanceclient>=2.8.0 # Apache-2.0 -python-novaclient>=9.1.0 # Apache-2.0 -python-cinderclient>=3.3.0 # Apache-2.0 -python-freezerclient>=1.3.0 # Apache-2.0 -python-karborclient>=0.6.0 # Apache-2.0 -python-neutronclient>=6.7.0 # Apache-2.0 -python-troveclient>=2.2.0 # Apache-2.0 -python-manilaclient>=1.16.0 # Apache-2.0 - -# Documentation -sphinx>=2.0.0,!=2.1.0 # BSD -reno>=2.5.0 # Apache-2.0 -openstackdocstheme>=2.0.0 # Apache-2.0 -os-api-ref>=1.4.0 # Apache-2.0 -sphinxcontrib-apidoc>=0.2.0 # BSD diff --git a/tools/install_venv.py b/tools/install_venv.py deleted file mode 100644 index bced3431..00000000 --- a/tools/install_venv.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Copyright 2010 OpenStack Foundation -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Installation script for karbor's development virtualenv.""" - -from __future__ import print_function - -import os -import sys - -import install_venv_common as install_venv - - -def print_help(): - help = """ - Karbor development environment setup is complete. - - Karbor development uses virtualenv to track and manage Python dependencies - while in development and testing. - - To activate the Karbor virtualenv for the extent of your current shell - session you can run: - - $ . .venv/bin/activate - - Or, if you prefer, you can run commands in the virtualenv on a case by case - basis by running: - - $ tools/with_venv.sh - - Also, make test will automatically use the virtualenv. - """ - print(help) - - -def main(argv): - root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - venv = os.path.join(root, '.venv') - if os.environ.get('venv'): - venv = os.environ['venv'] - pip_requires = os.path.join(root, 'requirements.txt') - test_requires = os.path.join(root, 'test-requirements.txt') - project = 'Karbor' - py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) - install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, - py_version, project) - options = install.parse_args(argv) - install.check_python_version() - install.check_dependencies() - install.create_virtualenv(no_site_packages=options.no_site_packages) - install.install_dependencies() - print_help() - - -if __name__ == '__main__': - main(sys.argv) diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py deleted file mode 100644 index 3a21b065..00000000 --- a/tools/install_venv_common.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides methods needed by installation script for OpenStack development -virtual environments. - -Since this script is used to bootstrap a virtualenv from the system's Python -environment, it should be kept strictly compatible with Python 2.6. - -Synced in from openstack-common -""" - -from __future__ import print_function - -import optparse -import os -import subprocess -import sys - - -class InstallVenv(object): - - def __init__(self, root, venv, requirements, - test_requirements, py_version, - project): - super(InstallVenv, self).__init__() - self.root = root - self.venv = venv - self.requirements = requirements - self.test_requirements = test_requirements - self.py_version = py_version - self.project = project - - def die(self, message, *args): - print(message % args, file=sys.stderr) - sys.exit(1) - - def check_python_version(self): - if sys.version_info < (2, 6): - self.die("Need Python Version >= 2.6") - - def run_command_with_code(self, cmd, redirect_output=True, - check_exit_code=True): - """Runs a command in an out-of-process shell. - - Returns the output of that command. Working directory is self.root. - """ - if redirect_output: - stdout = subprocess.PIPE - else: - stdout = None - - proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) - output = proc.communicate()[0] - if check_exit_code and proc.returncode != 0: - self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) - return (output, proc.returncode) - - def run_command(self, cmd, redirect_output=True, check_exit_code=True): - return self.run_command_with_code(cmd, redirect_output, - check_exit_code)[0] - - def get_distro(self): - if (os.path.exists('/etc/fedora-release') or - os.path.exists('/etc/redhat-release')): - return Fedora( - self.root, self.venv, self.requirements, - self.test_requirements, self.py_version, self.project) - else: - return Distro( - self.root, self.venv, self.requirements, - self.test_requirements, self.py_version, self.project) - - def check_dependencies(self): - self.get_distro().install_virtualenv() - - def create_virtualenv(self, no_site_packages=True): - """Creates the virtual environment and installs PIP. - - Creates the virtual environment and installs PIP only into the - virtual environment. - """ - if not os.path.isdir(self.venv): - print('Creating venv...', end=' ') - if no_site_packages: - self.run_command(['virtualenv', '-q', '--no-site-packages', - self.venv]) - else: - self.run_command(['virtualenv', '-q', self.venv]) - print('done.') - else: - print("venv already exists...") - pass - - def pip_install(self, *args): - self.run_command(['tools/with_venv.sh', - 'pip', 'install', '--upgrade'] + list(args), - redirect_output=False) - - def install_dependencies(self): - print('Installing dependencies with pip (this can take a while)...') - - # First things first, make sure our venv has the latest pip and - # setuptools and pbr - self.pip_install('pip>=1.4') - self.pip_install('setuptools') - self.pip_install('pbr') - - self.pip_install('-r', self.requirements, '-r', self.test_requirements) - - def parse_args(self, argv): - """Parses command-line arguments.""" - parser = optparse.OptionParser() - parser.add_option('-n', '--no-site-packages', - action='store_true', - help="Do not inherit packages from global Python " - "install.") - return parser.parse_args(argv[1:])[0] - - -class Distro(InstallVenv): - - def check_cmd(self, cmd): - return bool(self.run_command(['which', cmd], - check_exit_code=False).strip()) - - def install_virtualenv(self): - if self.check_cmd('virtualenv'): - return - - if self.check_cmd('easy_install'): - print('Installing virtualenv via easy_install...', end=' ') - if self.run_command(['easy_install', 'virtualenv']): - print('Succeeded') - return - else: - print('Failed') - - self.die('ERROR: virtualenv not found.\n\n%s development' - ' requires virtualenv, please install it using your' - ' favorite package management tool' % self.project) - - -class Fedora(Distro): - """This covers all Fedora-based distributions. - - Includes: Fedora, RHEL, CentOS, Scientific Linux - """ - - def check_pkg(self, pkg): - return self.run_command_with_code(['rpm', '-q', pkg], - check_exit_code=False)[1] == 0 - - def install_virtualenv(self): - if self.check_cmd('virtualenv'): - return - - if not self.check_pkg('python-virtualenv'): - self.die("Please install 'python-virtualenv'.") - - super(Fedora, self).install_virtualenv() diff --git a/tools/with_venv.sh b/tools/with_venv.sh deleted file mode 100644 index 94e05c12..00000000 --- a/tools/with_venv.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -tools_path=${tools_path:-$(dirname $0)} -venv_path=${venv_path:-${tools_path}} -venv_dir=${venv_name:-/../.venv} -TOOLS=${tools_path} -VENV=${venv:-${venv_path}/${venv_dir}} -source ${VENV}/bin/activate && "$@" diff --git a/tox.ini b/tox.ini deleted file mode 100644 index d5897618..00000000 --- a/tox.ini +++ /dev/null @@ -1,91 +0,0 @@ -[tox] -minversion = 3.1.1 -envlist = py37,pypy,pep8 -skipsdist = True -ignore_basepython_conflict = True - -[testenv] -basepython = python3 -usedevelop = True -install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -U {opts} {packages} -setenv = - VIRTUAL_ENV={envdir} - OS_TEST_PATH=./karbor/tests/unit - PYTHONWARNINGS=default::DeprecationWarning - LANGUAGE=en_US - LC_ALL=en_US.utf-8 -deps = -r{toxinidir}/test-requirements.txt -whitelist_externals = sh /bin/rm -commands = - /bin/rm -f .testrepository/times.dbm - stestr run {posargs} - stestr slowest - -[testenv:fullstack] -setenv = OS_TEST_PATH=./karbor/tests/fullstack - OS_TEST_TIMEOUT=3600 -commands = - oslo-config-generator --config-file etc/oslo-config-generator/karbor.conf --output-file etc/karbor.conf - stestr --test-path=./karbor/tests/fullstack run '{posargs}' - stestr slowest - -[testenv:pep8] -commands = flake8 - -[testenv:venv] -commands = {posargs} - -[testenv:cover] -setenv = - {[testenv]setenv} - PYTHON=coverage run --source karbor --parallel-mode -commands = - stestr run {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report - -[testenv:docs] -whitelist_externals = sh /bin/rm -commands = /bin/rm -rf doc/build - /bin/rm -rf doc/source/contributor/api - sphinx-build -W -b html doc/source doc/build/html - -[testenv:debug] -commands = oslo_debug_helper -t karbor/tests/unit {posargs} - -[testenv:api-ref] -# This environment is called from CI scripts to test and publish -# the API Ref to docs.openstack.org. -whitelist_externals = sh /bin/rm -deps = -r{toxinidir}/test-requirements.txt -commands = - /bin/rm -rf api-ref/build - sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html/ - -[testenv:releasenotes] -commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:genconfig] -commands = oslo-config-generator --config-file etc/oslo-config-generator/karbor.conf - -[testenv:genpolicy] -commands = oslopolicy-sample-generator --config-file=etc/karbor-policy-generator.conf - -[flake8] -show-source = True -# W503 line break before binary operator -# W504 line break after binary operator -ignore = W503,W504 -builtins = _ -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,releasenotes - -[hacking] -import_exceptions = karbor.i18n - -[testenv:lower-constraints] -deps = - -c{toxinidir}/lower-constraints.txt - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/requirements.txt